prepare a design report on mobile robot design for the application. Following your microcontroller selection for the application assigned above, you need to develop the external connections that will be made to its hardware. Produce the following
· description on specifications, features and functions of the mobile robot application
· pin assignments for inputs and outputs
· simple design for circuitry, sensors and actuators that are suited to your application
· the application of the design in a physical context to check and critically evaluate its functionality and effective interfacing
a record of any adaptations required as the prototype develops and to develop it to a commercial solution/ product
# update the estimated position of the robot using it's wheel encoder readings
def _update_odometry( self ):
R = self.robot_wheel_radius
N = oat( self.wheel_encoder_ticks_per_revolution )
# read the wheel encoder values
ticks_left, ticks_right = self.robot.read_wheel_encoders()
# get the difference in ticks since the last iteration
d_ticks_left = ticks_left - self.prev_ticks_left d_ticks_right = ticks_right - self.prev_ticks_right
# estimate the wheel movements
d_left_wheel = 2*pi*R*( d_ticks_left / N ) d_right_wheel = 2*pi*R*( d_ticks_right / N ) d_center = 0.5 * ( d_left_wheel + d_right_wheel )
# calculate the new pose
prev_x, prev_y, prev_theta = self.estimated_pose.scalar_unpack()
new_x = prev_x + ( d_center * cos( prev_theta ) )
new_y = prev_y + ( d_center * sin( prev_theta ) )
new_theta = prev_theta + ( ( d_right_wheel - d_left_wheel ) / self.robot_wheel_base_length )
# update the pose estimate with the new values
self.estimated_pose.scalar_update( new_x, new_y, new_theta )
# save the current tick count for the next iteration
self.prev_ticks_left = ticks_left self.prev_ticks_right = ticks_right
# sensor gains (weights)
self.sensor_gains = [ 1.0+( (0.4*abs(p.theta)) / pi )
for p in supervisor.proximity_sensor_placements() ]
# ...
# return an obstacle avoidance vector in the robot's reference frame
# also returns vectors to detected obstacles in the robot's reference frame def calculate_ao_heading_vector( self ):
# initialize vector
obstacle_vectors = [ [ 0.0, 0.0 ] ] * len( self.proximity_sensor_placements ) ao_heading_vector = [ 0.0, 0.0 ]
# get the distances indicated by the robot's sensor readings
sensor_distances = self.supervisor.proximity_sensor_distances()
# calculate the position of detected obstacles and nd an avoidance vector
robot_pos, robot_theta = self.supervisor.estimated_pose().vector_unpack()
for i in range( len( sensor_distances ) ):
# calculate the position of the obstacle
sensor_pos, sensor_theta = self.proximity_sensor_placements[i].vector_unpack() vector = [ sensor_distances[i], 0.0 ]
vector = linalg.rotate_and_translate_vector( vector, sensor_theta, sensor_pos ) obstacle_vectors[i] = vector # store the obstacle vectors in the robot's reference frame
# accumulate the heading vector within the robot's reference frame
ao_heading_vector = linalg.add( ao_heading_vector, linalg.scale( vector, self.sensor_gains[i] ) )
return ao_heading_vector, obstacle_vectors
Comments
Leave a comment