【行空板】AI跟随摄像头
本帖最后由 云天 于 2022-5-23 21:36 编辑【项目设计】
已经掌握了行控板与Micro:bit扩展板的结合的使用方法,那么扩展板上的舵机引脚就可以驱动摄像头云台。
Mediapipe获取人脸中心坐标,通过滤波算法使用得数据不频繁抖动。通过PID算法,控制舵机运行,追踪人脸。
【控制舵机】
microbit_motor.py文件可从Pinpong库中找到,上传到行空板中,应与主文件在同一个文件夹内。
# -*- coding: utf-8 -*-
import time
from pinpong.board import Board
from microbit_motor import Microbit_Motor #导入Microbit_Motor库
Board("microbit").begin()#初始化,选择板型和端口号,不输入端口号则进行自动识别
#Board("microbit","COM36").begin()#windows下指定端口初始化
#Board("microbit","/dev/ttyACM0").begin() #linux下指定端口初始化
#Board("microbit","/dev/cu.usbmodem14101").begin() #mac下指定端口初始化
motorbit = Microbit_Motor()
while True:
#舵机引脚S1-S8,角度范围0-180
motorbit.servo(motorbit.S2, 0)
time.sleep(1)
motorbit.servo(motorbit.S2, 90)
time.sleep(1)
motorbit.servo(motorbit.S2, 180)
time.sleep(1)
motorbit.servo(motorbit.S2, 90)
time.sleep(1)
【行空板获取摄像头全屏】在行空板网页端使用Jupyter notebook,在终端使用:pip install cvzone,安装cvzone
import cvzone.FaceDetectionModule as face
import cv2
import numpy as np
import time
def main():
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
cv2.namedWindow('camera',cv2.WND_PROP_FULLSCREEN) #窗口全屏
cv2.setWindowProperty('camera', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) #窗口全屏
detector = face.FaceDetector()
# For a 640x480 image center target is 320 and 240
while True:
success, img = cap.read()
img, bboxs = detector.findFaces(img)
if bboxs:
x, y, w, h = bboxs["bbox"]
cx, cy = bboxs["center"]
xVal=cx
yVal=cy
cv2.putText(img, f'x:{xVal} , y:{yVal} ', (x, y - 100), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 0), 3)
output_image = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)
cv2.imshow("camera", output_image)
cv2.waitKey(1)
if __name__ == "__main__":
main()
【人脸中心数据滤波】
使用的是“递推平均滤波法”:
递推平均滤波法(又称滑动平均滤波法)
方法:把连续取N个采样值看成一个队列,遵循先进先出原则队列的长度固定为N每次采样到一个新数据放入队尾,并扔掉原来队首的一次数据.(先进先出原则)把队列中的N个数据进行算术平均运算,就可获得新的滤波结果N值的选取:流量,N=12;压力:N=4;液面,N=4~12;温度,N=1~4
优点:对周期性干扰有良好的抑制作用,平滑度高适用于高频振荡的系统
缺点:灵敏度低对偶然出现的脉冲性干扰的抑制作用较差不易消除由于脉冲干扰所引起的采样值偏差不适用于脉冲干扰比较严重的场合比较浪费RAM
import cvzone.FaceDetectionModule as face
import cv2
import numpy as np
import time
def main():
cap = cv2.VideoCapture(0)
#cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
#cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
#cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
#cv2.namedWindow('camera',cv2.WND_PROP_FULLSCREEN) #窗口全屏
#cv2.setWindowProperty('camera', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) #窗口全屏
detector = face.FaceDetector()
s=
while True:
PTime = time.time()
success, img = cap.read()
img, bboxs = detector.findFaces(img)
if bboxs:
x, y, w, h = bboxs["bbox"]
cx, cy = bboxs["center"]
xVal=cx
yVal=cy
cv2.putText(img, f'x:{xVal} , y:{yVal} ', (x, y - 100), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 0), 3)
#output_image = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)
s.pop(0)
s.append(cx)
mean=int(np.mean(s))
cv2.putText(img, f'x:{mean} , y:{yVal} ', (x, y - 50), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 0), 3)
fps = 1 / (time.time() - PTime)
cv2.putText(img, f'FPS: {int(fps)}', (20,50), cv2.FONT_HERSHEY_PLAIN,
3, (255, 255, 0), 3)
cv2.imshow("camera", img)
cv2.waitKey(1)
if __name__ == "__main__":
main()
【PID控制水平舵机】
import cvzone.FaceDetectionModule as face
import cv2
import numpy as np
import time
from pinpong.board import Board
from microbit_motor import Microbit_Motor #导入Microbit_Motor库
Board("microbit").begin()
motorbit = Microbit_Motor()
targetVal=0
pError=0
pTime=0
pidVals=
I=0
jd_x=90
def pid(cVal):
global I,pidVals,pTime,pError,targetVal
# Current Value - Target Value
t = time.time() - pTime
error = cVal - targetVal
P = pidVals * error
I = I + (pidVals * error * t)
D = (pidVals * (error - pError)) / t
result = P + I + D
pError = error
ptime = time.time()
return result
def main():
global ptime,jd_x
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
cv2.namedWindow('camera',cv2.WND_PROP_FULLSCREEN) #窗口全屏
cv2.setWindowProperty('camera', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) #窗口全屏
detector = face.FaceDetector()
s=
ptime = time.time()
motorbit.servo(motorbit.S2,jd_x)
pre_jd_x=0
while True:
PTime = time.time()
success, img = cap.read()
img, bboxs = detector.findFaces(img)
if bboxs:
x, y, w, h = bboxs["bbox"]
cx, cy = bboxs["center"]
xVal=cx
yVal=cy
cv2.putText(img, f'x:{xVal} , y:{yVal} ', (x, y - 100), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 0), 3)
s.pop(0)
s.append(cx)
mean=int(np.mean(s))
jd_x=jd_x+int(pid(160-mean))
if jd_x<1:
jd_x=1
if jd_x>179:
jd_x=179
print(jd_x)
if pre_jd_x !=jd_x:
pre_jd_x =jd_x
motorbit.servo(motorbit.S2,jd_x)
cv2.putText(img, f'x:{mean} , y:{yVal} ', (x, y - 50), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 0), 3)
fps = 1 / (time.time() - PTime)
cv2.putText(img, f'FPS: {int(fps)}', (20,50), cv2.FONT_HERSHEY_PLAIN,
3, (255, 255, 0), 3)
output_image = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)
cv2.imshow("camera", output_image)
cv2.waitKey(1)
if __name__ == "__main__":
main()
【使用PID类控制水平】
import cvzone.FaceDetectionModule as face
import cv2
import numpy as np
import time
from pinpong.board import Board
from microbit_motor import Microbit_Motor #导入Microbit_Motor库
Board("microbit").begin()
motorbit = Microbit_Motor()
targetVal=0
pError=0
pTime=0
pidVals=
I=0
jd_x=90
class PID:
def __init__(self, pidVals, targetVal,limit=None):
self.pidVals = pidVals
self.targetVal = targetVal
self.pError = 0
self.limit = limit
self.I = 0
self.pTime = 0
def update(self,cVal):
# Current Value - Target Value
t = time.time() - self.pTime
error = cVal - self.targetVal
P = self.pidVals * error
self.I = self.I + (self.pidVals * error * t)
D = (self.pidVals * (error - self.pError)) / t
result = P + self.I + D
if self.limit is not None:
result = float(np.clip(result, self.limit, self.limit))
self.pError = error
self.ptime = time.time()
return result
def main():
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
cv2.namedWindow('camera',cv2.WND_PROP_FULLSCREEN) #窗口全屏
cv2.setWindowProperty('camera', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) #窗口全屏
detector = face.FaceDetector()
s_x=
jd_x=90
pre_jd_x=0
motorbit.servo(motorbit.S2,jd_x)
xPID = PID(, 320 // 2,limit=[-90, 90])
while True:
PTime = time.time()
success, img = cap.read()
img, bboxs = detector.findFaces(img)
if bboxs:
x, y, w, h = bboxs["bbox"]
cx, cy = bboxs["center"]
xVal=cx
yVal=cy
cv2.putText(img, f'x:{xVal} , y:{yVal} ', (x, y - 100), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 0), 3)
s_x.pop(0)
s_x.append(cx)
mean_x=int(np.mean(s_x))
xVal= int(xPID.update(mean_x))
jd_x=jd_x-xVal
print(jd_x)
if pre_jd_x !=jd_x:
pre_jd_x =jd_x
motorbit.servo(motorbit.S2,jd_x)
cv2.putText(img, f'x:{mean_x} , y:{yVal} ', (x, y - 50), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 0), 3)
fps = 1 / (time.time() - PTime)
cv2.putText(img, f'FPS: {int(fps)}', (20,50), cv2.FONT_HERSHEY_PLAIN,
3, (255, 255, 0), 3)
output_image = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)
cv2.imshow("camera", output_image)
cv2.waitKey(1)
if __name__ == "__main__":
main()
【控制水平和垂直舵机】
import cvzone.FaceDetectionModule as face
import cv2
import numpy as np
import time
from pinpong.board import Board
from microbit_motor import Microbit_Motor #导入Microbit_Motor库
Board("microbit").begin()
motorbit = Microbit_Motor()
targetVal=0
pError=0
pTime=0
pidVals=
I=0
jd_x=90
class PID:
def __init__(self, pidVals, targetVal,limit=None):
self.pidVals = pidVals
self.targetVal = targetVal
self.pError = 0
self.limit = limit
self.I = 0
self.pTime = 0
def update(self,cVal):
# Current Value - Target Value
t = time.time() - self.pTime
error = cVal - self.targetVal
P = self.pidVals * error
self.I = self.I + (self.pidVals * error * t)
D = (self.pidVals * (error - self.pError)) / t
result = P + I + D
if self.limit is not None:
result = float(np.clip(result, self.limit, self.limit))
self.pError = error
self.ptime = time.time()
return result
def main():
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
cv2.namedWindow('camera',cv2.WND_PROP_FULLSCREEN) #窗口全屏
cv2.setWindowProperty('camera', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) #窗口全屏
detector = face.FaceDetector()
s_x=
s_y=
jd_x=90
jd_y=135
pre_jd_y=0
motorbit.servo(motorbit.S1,jd_y)
xPID = PID(, 320 // 2,limit=[-90, 90])
yPID = PID(, 240 // 2,limit=[-45, 45])
while True:
PTime = time.time()
success, img = cap.read()
img, bboxs = detector.findFaces(img)
if bboxs:
x, y, w, h = bboxs["bbox"]
cx, cy = bboxs["center"]
xVal=cx
yVal=cy
cv2.putText(img, f'x:{xVal} , y:{yVal} ', (x, y - 100), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 0), 3)
s_x.pop(0)
s_y.pop(0)
s_x.append(cx)
s_y.append(cy)
mean_x=int(np.mean(s_x))
mean_y=int(np.mean(s_y))
xVal= int(xPID.update(mean_x))
yVal= int(yPID.update(mean_y))
jd_x=jd_x-xVal
jd_y=jd_y-yVal
print(jd_x,jd_y)
if pre_jd_x !=jd_x:
pre_jd_x =jd_x
motorbit.servo(motorbit.S2,jd_x)
if pre_jd_y !=jd_y:
pre_jd_y =jd_y
motorbit.servo(motorbit.S1,jd_y)
cv2.putText(img, f'x:{mean_x} , y:{mean_y} ', (x, y - 50), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 0), 3)
fps = 1 / (time.time() - PTime)
cv2.putText(img, f'FPS: {int(fps)}', (20,50), cv2.FONT_HERSHEY_PLAIN,
3, (255, 255, 0), 3)
output_image = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)
cv2.imshow("camera", output_image)
cv2.waitKey(1)
if __name__ == "__main__":
main()
【演示视频】
https://www.bilibili.com/video/BV1PY4y1L7K9?share_source=copy_web
{:6_209:}试了下,行空板连接microBIT扩展板贼好用!学到了!谢谢! 能获得XY的坐标数据,能获得Z轴角度吗,或是,眼睛的水平角度。就是头的侧倾斜角度。陈德新 cdx777777 二哈识图是个好东西 看着不错!我也来搞搞
页:
[1]