过程:
def motor_init(): global l_motor, r_motor l_motor= gpio.pwm(l_motor,100) r_motor = gpio.pwm(r_motor,100) l_motor.start(0) r_motor.start(0) def direction_init(): gpio.tup(left_back,gpio.out) gpio.tup(left_front,gpio.out) gpio.tup(l_motor,gpio.out) gpio.tup(right_front,gpio.out) gpio.tup(right_back,gpio.out) gpio.tup(r_motor,gpio.out) def rvo_init(): global pwm_rvo pwm_rvo=adafruit_pca9685.pca9685()def init(): gpio.twarnings(fal) gpio.tmode(gpio.bcm) direction_init() rvo_init() motor_init()
def front(speed): l_motor.changedutycycle(speed) gpio.output(left_front,1) #left_front gpio.output(left_back,0) #left_back r_motor.changedutycycle(speed) gpio.output(right_front,1) #right_front gpio.output(right_back,0) #right_back def back(speed): l_motor.changedutycycle(speed) gpio.output(left_front,0) #left_front gpio.output(left_back,1) #left_back r_motor.changedutycycle(speed) gpio.output(right_front,0) #right_front gpio.output(right_back,1) #right_back def left(speed): l_motor.changedutycycle(speed) gpio.output(left_front,0) #left_front gpio.output(left_back,1) #left_back r_motor.changedutycycle(speed) gpio.output(right_front,1) #right_front gpio.output(right_back,0) #right_backdef right(speed): l_motor.changedutycycle(speed) gpio.output(left_front,1) #left_front gin前缀pio.output(left_back,0) 反比例 #left_back r_motor.changedutycycle(speed) gpio.output(right_front,0) #right_front gpio.output(right_back,1) #right_back def stop(): l_motor.changedutycycle(0) gpio.output(left_front,0) #left_front gpio.output(left_back,0) #left_back r_motor.changedutycycle(0) gpio.output(right_front,0) #right_front gpio.output(right_back,0) #right_back
def t_rvo_angle(channel,angle): angle=4096*((angle*11)+500)/20000 pwm_rvo.t_pwm_freq(50) #frequency==50hz (rvo) pwm_rvo.t_pwm(channel,0,int(angle))
t_rvo_angle(4, 110) #top rvo lengthwi #0:back 180:front t_rvo_angle(5, 90) #bottom rvo crosswi #0:left 180:right
上面的(4):是顶部的舵机(摄像头上下摆动的那个舵机)
下面的(5):是底部的舵机(摄像头左右摆动的那个舵机)
# 1 image process img, contours = image_processing()
width, height = 160, 120 camera = cv2.videocapture(0) camera.t(3,width) camera.t(4,height)
打开摄像头,并设置窗口大小。
设置小窗口的原因: 小窗口实时性比较好。
# capture the frames ret, frame = camera.read()
# to graygray = cv2.cvtcolor(frame, cv2.color_bgr2gray)cv2.imshow('gray',gray)
# gausi blur blur = cv2.gaussianblur(gray,(5,5),0)
#brighten blur = cv2.convertscaleabs(blur, none, 1.5, 30)
#to binary ret,binary = cv2.threshold(blur,150,255,cv2.thresh_binary_inv) cv2.imshow('binary',binary)
#clo kernel = cv2.getstructuringelement(cv2.morph_rect, (17,17)) clo = cv2.morphologyex(binary, cv2.morph_clo, kernel) cv2.imshow('clo',clo)
#get contours binary_c,contours,hierarchy = cv2.findcontours(clo, 1, cv2.chain_approx_none) cv2.drawcontours(image, contours, -1, (255,0,255), 2) cv2.imshow('image', image)
def image_processing(): # capture the frames ret, frame = camera.read() # crop the image image = frame cv2.imshow('frame',frame) # to gray gray = cv2.cvtcolor(frame, cv2.color_bgr2gray) cv2.imshow('gray',gray) # gausi blur blur = cv2.gaussianblur(gray,(5,5),0) #brighten blur = cv2.convertscaleabs(blur, none, 1.5, 30) #to binary ret,binary = cv2.threshold(blur,150,255,cv2.thresh_binary_inv) cv2.imshow('binary',binary) #clo kernel = cv2.getstructuringelement(cv2.morph_rect, (17,17)) clo = cv2.morphologyex(binary, cv2.morph_clo, kernel) cv2.imshow('clo',clo) #get contours binary_c,contours,hierarchy = cv2.findcontours(clo, 1, cv2.chain_approx_none) cv2.drawcontours(image, contours, -1, (255,0,255), 2) cv2.imshow('image', image) return frame, contours
由于有可能出现多个物体,我们这里只识别最大的物体(深度学习可以搞分类,还没学到这,学到了再做),得到它的坐标。
# 2 get coordinates x, y = get_coord(img, contours)
def get_coord(img, contours): image = img.copy() try: contour = max(contours, key=cv2.contourarea) cv2.drawcontours(image, contour, -1, (255,0,255), 2) cv2.imshow('new_frame', image) # get coord m = cv2.moments(contour) x = int(m['m10']/m['m00']) y = int(m['m01']/m['m00']) print(x, y) return x,y except: print 'no objects' return 0,0
返回最大轮廓的坐标:
根据反馈回来的坐标,判断它的位置,进行运动。
# 3 move move(x,y)
if x==0 and y==0: stop()
识别到物体,且在正中央(中间1/2区域),让物体向前走。
#go ahead elif width/4 <x and x<(width-width/4): front(70)
物体在左边1/4区域。
#left elif x < width/4: left(50)
物体在右边1/4区域。
#right elif x > (width-width/4): right(50)
def move(x,y): global cond #stop if x==0 and y==0: stop() #go ahead elif width/4 <x and x<(width-width/4): front(70) #left elif x < width/4: left(50) #right elif x > (width-width/4): right(50)
#object trackingimport rpi.gpio as gpioimport timeimport adafruit_pca9685import numpy as npimport cv2cond = 0 width, height = 160, 120camera = cv2.videocapture(0)camera.t(3,width) camera.t(4,height) l_motor = 18left_front = 22left_back = 27r_motor = 23right_front = 25right_back = 24 def motor_init(): global l_motor, r_motor l_motor= gpio.pwm(l_motor,100) r_motor = gpio.pwm(r_motor,100) l_motor.start(0) r_motor.start(0) def direction_init(): gpio.tup(left_back,gpio.out) gpio.tup(left_front,gpio.out) gpio.tup(l_motor,gpio.out) gpio.tup(right_front,gpio.out) gpio.tup(right_back,gpio.out) gpio.tup(r_motor,gpio.out) def rvo_init(): global pwm_rvo pwm_rvo=adafruit_pca9685.pca9685()def init(): gpio.twarnings(fal) gpio.tmode(gpio.bcm) direction_init() rvo_init() motor_init()def front(speed): l_motor.changedutycycle(speed) gpio.output(l入党鉴定书eft_front,1) #left_front gpio.output(left_back,0) #left_back r_motor.changedutycycle(speed) gpio.output(right_front,1) #right_front gpio.output(right_back,0) #right_back def back(speed): l_motor.changedutycycle(speed) gpio.output(left_front,0) #left_front gpio.output(left_back,1) #left_back r_motor.changedutycycle(speed) gpio.output(right_front,0) #right_front gpio.output(right_back,1) #right_back def left(speed): l_motor.changedutycycle(speed) gpio.output(left_front,0) #left_front gpio.output(left_back,1) #left_back r_motor.changedutycycle(speed) gpio.output(right_虱子和跳蚤front,1) #right_front gpio.output(right_back,0) #right_back def right(speed): l_motor.changedutycycle(speed) gpio.output(left_front,1) #left_front gpio.output(left_back,0) #left_back r_motor.changedutycycle(speed) gpio.output(right_front,0) #right_front gpio.output(right_back,1) #right_backdef stop(): l_motor.changedutycycle(0) gpio.output(left_front,0) #left_front gpio.output(left_back,0) #left_back r_motor.changedutycycle(0) gpio.output(right_front,0) #right_front gpio.output(right_back,0) #right_backdef t_rvo_angle(channel,angle): angle=4096*((angle*11)+500)/20000 pwm_rvo.t_pwm_freq(50) #frequency==50hz (rvo) pwm_rvo.t_pwm(channel,0,int(angle)) def image_processing(): # capture the frames ret, frame = camera.read() # crop the image image = frame cv2.imshow('frame',frame) # to gray gray = cv2.cvtcolor(frame, cv2.color_bgr2gray) cv2.imshow('gray',gray) # gausi blur blur = cv2.gaussianblur(gray,(5,5),0) #brighten blur = cv2.convertscaleabs(blur, none, 1.5, 30) #to binary ret,binary = cv2.threshold(blur,150,255,cv2.thresh_binary_inv) cv2.imshow('binary',binary) #clo kernel = cv2.getstructuringelement(cv2.morph_rect, (17,17)) clo = cv2.morphologyex(binary, cv2.morph_clo, kernel) cv2.imshow('clo',clo) #get contours binary_c,contours,hierarchy = cv2.findcontours(clo, 1, cv2.chain_approx_none) cv2.drawcontours(image, contours, -1, (255,0,255), 2) cv2.imshow('image', image) return frame, contoursdef get_coord(img, contours): image = img.copy() try: contour = max(contours, key=cv2.contourarea) cv2.drawcontours(image, contour, -1, (255,0,255), 2) cv2.imshow('new_frame', image) # get coord m = cv2.moments(contour) x = int(m['m10']/m['m00']) y = int(m['m01']/m['m00']) print(x, y) return x,y except: print 'no objects' return 0,0 def move(x,y): global cond #stop if x==0 and y==0: stop() #go ahead elif width/4 <x and x<(width-width/4): front(70) #left elif x < width/4: left(50) #right elif x > (width-width/4): right(50) if __name__ == '__main__': init() t_rvo_angle(4, 110) #top rvo lengthwi #0:back 180:front t_rvo_angle(5, 90) #bottom rvo crosswi #0:left 180:right while 1: # 1 image process img, contours = image_processing() # 2 get coordinates x, y = get_coord(img, contours) # 3 move move(x,y) # must include this codes(otherwi you can't open camera successfully) if cv2.waitkey(1) & 0xff == ord('q'): stop() gpio.cleanup() break #front(50) #back(50) #$left(50) #right(50) #time.sleep(1) #stop()
检测原理是基于最大轮廓的检测,没有用深度学习的分类,所以容易受到干扰,后期学完深度学习会继续优化。有意见或者想法的朋友欢迎交流。
以上就是opencv物体跟踪树莓派视觉小车实现过程学习的详细内容,更多关于opencv物体跟踪树莓派视觉小车的资料请关注www.887551.com其它相关文章!
本文发布于:2023-04-03 21:57:06,感谢您对本站的认可!
本文链接:https://www.wtabcd.cn/fanwen/zuowen/6e0cce88e93ae2109ffabd99c41fc3ef.html
版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系,我们将在24小时内删除。
本文word下载地址:OpenCV物体跟踪树莓派视觉小车实现过程学习.doc
本文 PDF 下载地址:OpenCV物体跟踪树莓派视觉小车实现过程学习.pdf
留言与评论(共有 0 条评论) |