【智控万物】控制海康摄像头(续)
以下视频为例5的演示(控制海康摄像头)https://v.youku.com/v_show/id_XNDY1ODk0MzMxNg==.html
1、Python,语音识别
# -*- coding: utf-8 -*-
# Date : 2018-12-02 19:04:55
import wave
import requests
import time
import base64
from pyaudio import PyAudio, paInt16
import webbrowser
framerate = 16000# 采样率
num_samples = 2000# 采样点
channels = 1# 声道
sampwidth = 2# 采样宽度2bytes
FILEPATH = 'speech.wav'
base_url = "https://openapi.baidu.com/oauth/2.0/token?grant_type=client_credentials&client_id=%s&client_secret=%s"
APIKey = "PK36Zo9eVbcDzAV2esTc5Zpc"
SecretKey = "tnxmdSycvbrz6yRveYdRGOZE60nNkMLO"
HOST = base_url % (APIKey, SecretKey)
def getToken(host):
res = requests.post(host)
return res.json()['access_token']
def save_wave_file(filepath, data):
wf = wave.open(filepath, 'wb')
wf.setnchannels(channels)
wf.setsampwidth(sampwidth)
wf.setframerate(framerate)
wf.writeframes(b''.join(data))
wf.close()
def my_record():
pa = PyAudio()
stream = pa.open(format=paInt16, channels=channels,
rate=framerate, input=True, frames_per_buffer=num_samples)
my_buf = []
# count = 0
t = time.time()
print('正在录音...')
while time.time() < t + 4:# 秒
string_audio_data = stream.read(num_samples)
my_buf.append(string_audio_data)
print('录音结束.')
save_wave_file(FILEPATH, my_buf)
stream.close()
def get_audio(file):
with open(file, 'rb') as f:
data = f.read()
return data
def speech2text(speech_data, token, dev_pid=1537):
FORMAT = 'wav'
RATE = '16000'
CHANNEL = 1
CUID = '*******'
SPEECH = base64.b64encode(speech_data).decode('utf-8')
data = {
'format': FORMAT,
'rate': RATE,
'channel': CHANNEL,
'cuid': CUID,
'len': len(speech_data),
'speech': SPEECH,
'token': token,
'dev_pid':dev_pid
}
url = 'https://vop.baidu.com/server_api'
headers = {'Content-Type': 'application/json'}
# r=requests.post(url,data=json.dumps(data),headers=headers)
print('正在识别...')
r = requests.post(url, json=data, headers=headers)
Result = r.json()
if 'result' in Result:
return Result['result']
else:
return Result
def openbrowser(text):
maps = {
'百度': ['百度', 'baidu'],
'腾讯': ['腾讯', 'tengxun'],
'网易': ['网易', 'wangyi']
}
if text in maps['百度']:
webbrowser.open_new_tab('https://www.baidu.com')
elif text in maps['腾讯']:
webbrowser.open_new_tab('https://www.qq.com')
elif text in maps['网易']:
webbrowser.open_new_tab('https://www.163.com/')
else:
webbrowser.open_new_tab('https://www.baidu.com/s?wd=%s' % text)
if __name__ == '__main__':
#flag = 'y'
#while flag.lower() == 'y':
#print('请输入数字选择语言:')
#devpid = input('1536:普通话(简单英文),1537:普通话(有标点),1737:英语,1637:粤语,1837:四川话\n')
devpid='1536'
my_record()
TOKEN = getToken(HOST)
speech = get_audio(FILEPATH)
result = speech2text(speech, TOKEN, int(devpid))
print(result)
if type(result) == str:
#openbrowser(result.strip(','))
#flag = input('Continue?(y/n):')
2、Python发QQ电子邮件
import smtplib
from email.header import Header # 用来设置邮件头和邮件主题
from email.mime.text import MIMEText # 发送正文只包含简单文本的邮件,引入MIMEText即可
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
# 发件人和收件人
sender = 'hbzlzx@qq.com'
receiver = 'hbzlzx@qq.com'
# 所使用的用来发送邮件的SMTP服务器
smtpServer = 'smtp.qq.com'
# 发送邮箱的用户名和授权码(不是登录邮箱的密码)
username = 'hbzlzx@qq.com'
password = '***********************'
mail_title = '监控自动报警'
mail_body = '这里是邮件的正文'
mail_msg='''
<p>监控截图如下:</p>
<p><img src="cid:image1"></p>
'''
# 创建一个实例
msg = MIMEMultipart()
msg['Subject'] = mail_title# 标题
msg['From'] = sender# 邮件中显示的发件人别称
msg['To'] = receiver# ...收件人...
msg.attach(MIMEText(mail_msg, 'html', 'utf-8'))
# 指定图片为当前目录
fp = open(r'.\img\image0.jpg', 'rb')
msgImage = MIMEImage(fp.read())
fp.close()
# 定义图片 ID,在 HTML 文本中引用
msgImage.add_header('Content-ID', '<image1>')
msg.attach(msgImage)
ctype = 'application/octet-stream'
maintype, subtype = ctype.split('/', 1)
# 附件-图片
image = MIMEImage(open(r'.\img\image0.jpg', 'rb').read(), _subtype=subtype)
image.add_header('Content-Disposition', 'attachment', filename='img.jpg')
msg.attach(image)
try:
smtp = smtplib.SMTP() # 创建一个连接
smtp.connect(smtpServer) # 连接发送邮件的服务器
smtp.login(username, password) # 登录服务器
smtp.sendmail(sender, receiver, msg.as_string())# 填入邮件的相关信息并发送
print("邮件发送成功!!!")
smtp.quit()
except smtplib.SMTPException:
print("邮件发送失败!!!")
4、百度人脸注册1(Token)。
ijvylcjwmezscadh
# -*- coding: utf-8 -*-
from aip import AipFace
APP_ID='15174864'
API_KEY='z4iPlvW4VLKMwu4KG0Z3jUMq'
SECRET_KEY='ghQO3vHmxsdCQswFx6DvGMGjbhgAoNXI'
client = AipFace(APP_ID, API_KEY, SECRET_KEY)
imageType = 'FACE_TOKEN'
userId = "s230183198109263222"
groupId = 'mxy'
options = {}
options["user_info"] =str("宋秀cc,ge7,232102198008113018,sxs,15030352887".encode())
data=client.faceGetlist(userId, groupId)
if data["error_msg"]=='SUCCESS':
image=data["result"]["face_list"]["face_list"])-1]["face_token"]
print(client.updateUser(image, imageType, groupId, userId, options))
data=client.getUser(userId, groupId)
print(data)
else:
print(data)
5、调用监控摄像头,进行本地人脸检测(班级是否有回头说话,不戴口罩者),并截图保存,文件名有截取时间。
import cv2,time
classfier=cv2.CascadeClassifier("D:\\zaw\\xml\\haarcascade_frontalface_default.xml")#定义分类器
max_x=0
max_y=0
max_h=0
max_w=0
max=0
color = (255,0,155)#设置人脸框的颜色
#url = 'rtsp://admin:a12345678@172.20.0.81:554/1'#办公室
url = 'rtsp://admin:a12345678@172.20.0.32:554/1'
cap = cv2.VideoCapture(url)
path_name="./img"
num=1
i=0
while(cap.isOpened()):
# Capture frame-by-frame
ret, frame = cap.read()
# Display the resulting frame
try:
image=cv2.resize(frame, (800,600), interpolation=cv2.INTER_AREA)
if i>30:
i=0
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)#将当前桢图像转换成灰度图像(这里有修改)
#如下三行是设定最小图像的大小
#第三个参数表示每一个目标至少要被检测到3次才算是真的目标(因为周围的像素和不同的窗口大小都可以检测到人脸),
faceRects = classfier.detectMultiScale(image, 1.3,5, cv2.CASCADE_SCALE_IMAGE,(8,8))#人脸检测
print(len(faceRects))
max=0
if len(faceRects)>0:#如果人脸数组长度大于0
for faceRect in faceRects: #对每一个人脸画矩形框
x, y, w, h = faceRect
if w*h>max :
max=w*h
max_x=x
max_y=y
max_h=h
max_w=w
now = time.localtime()
img_name = "%s/%s.jpg" % (path_name, time.strftime("%Y-%m-%d %H:%M:%S",now).replace(":","-") )
print(img_name)
image1 = image
cv2.imwrite(img_name, image1,)
num += 1
cv2.rectangle(image, (max_x, max_y), (max_x+max_w, max_y+max_h), color,2)#矩形的两个点(左上角与右下角),颜色,线的类型(不设置就
else:
print(i)
i=i+1
cv2.imshow('frame',image)
except:
print("False")
cap = cv2.VideoCapture(url)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
6、先本地检测出人脸,再送百度云进行人脸比对
import cv2,time
from aip import AipFace
import base64
from PIL import Image
from io import BytesIO
def frame2base64(frame):
img = Image.fromarray(frame) #将每一帧转为Image
output_buffer = BytesIO() #创建一个BytesIO
img.save(output_buffer, format='JPEG') #写入output_buffer
byte_data = output_buffer.getvalue() #在内存中读取
base64_data = base64.b64encode(byte_data) #转为BASE64
return base64_data #转码成功 返回base64编码
""" 你的 APPID AK SK """
APP_ID = '15174864'
API_KEY = 'z4iPlvW4VLKMwu4KG0Z3jUMq'
SECRET_KEY = 'ghQO3vHmxsdCQswFx6DvGMGjbhgAoNXI'
client = AipFace(APP_ID, API_KEY, SECRET_KEY)
classfier=cv2.CascadeClassifier("D:\\zaw\\xml\\haarcascade_frontalface_default.xml")#定义分类器
max_x=0
max_y=0
max_h=0
max_w=0
max=0
color = (255,0,155)#设置人脸框的颜色
url = 'rtsp://admin:a12345678@172.20.0.81:554/1'
cap = cv2.VideoCapture(url)
path_name="./img"
num=1
i=0
while(cap.isOpened()):
# Capture frame-by-frame
ret, frame = cap.read()
# Display the resulting frame
#try:
image=cv2.resize(frame, (800,600), interpolation=cv2.INTER_AREA)
if i>30:
i=0
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)#将当前桢图像转换成灰度图像(这里有修改)
#如下三行是设定最小图像的大小
#第三个参数表示每一个目标至少要被检测到3次才算是真的目标(因为周围的像素和不同的窗口大小都可以检测到人脸),
faceRects = classfier.detectMultiScale(image, 1.3,2, cv2.CASCADE_SCALE_IMAGE,(32,32))#人脸检测
print(len(faceRects))
max=0
if len(faceRects)>0:#如果人脸数组长度大于0
for faceRect in faceRects: #对每一个人脸画矩形框
x, y, w, h = faceRect
if w*h>max :
max=w*h
max_x=x
max_y=y
max_h=h
max_w=w
img_name = "%s/%d.jpg" % (path_name, num )
print(img_name)
image1 = image
image1 =frame2base64(image1)
image1=str(image1, encoding='utf-8')#不能传入字节
imageType = "BASE64"
groupIdList = "sxs,mxy"
""" 如果有可选参数 """
options = {}
options["quality_control"] = "NORMAL"
options["liveness_control"] = "LOW"
options["max_user_num"] = 1
""" 带参数调用人脸搜索 """
data=client.search(image1, imageType, groupIdList, options)
print(data)
cv2.imwrite(img_name, image1,)
num += 1
cv2.rectangle(image, (max_x, max_y), (max_x+max_w, max_y+max_h), color,2)#矩形的两个点(左上角与右下角),颜色,线的类型(不设置就
else:
print(i)
i=i+1
cv2.imshow('frame',image)
"""except:
print("False")
cap = cv2.VideoCapture(url)"""
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
7、人脸识别加蓝牙指令发送
from aip import AipBodyAnalysis
import pyttsx3,cv2
import numpy as np
import serial #包含相应的库
import sys,time
import pyttsx3
engine = pyttsx3.init()#语音初始化
""" 你的 APPID AK SK """
APP_ID = '14333301'
API_KEY = '0OoR1G35nUEKt1E2xagNcr6l'
SECRET_KEY = 'kUoHSSsbMuRcuBT2xWMdWXwXv57rnxW5'
client = AipBodyAnalysis(APP_ID, API_KEY, SECRET_KEY)#百度手势识别初始化
#ser=serial.Serial("com4",9600,timeout=0.5)#winsows系统使用com1口连接串行口
cv2.namedWindow("face")#命名一个窗口
cap=cv2.VideoCapture(1)#打开1号摄像头
success, frame = cap.read()#读取一桢图像,前一个返回值是是否成功,后一个返回值是图像本身
color = (55,255,155)#设置人脸框的颜色
classfier=cv2.CascadeClassifier("D:\\zaw\\xml\\haarcascade_fullbody.xml")#定义分类器
max_x=0
max_y=0
max_h=0
max_w=0
max=0
def get_file_content(filePath):
with open(filePath, 'rb') as fp:
return fp.read()
engine.say('开始识别')
engine.runAndWait()
while 1:
success, frame = cap.read()
size=frame.shape[:2]#获得当前桢彩色图像的大小
#输入图像,尽量使用灰度图以加快检测速度
image=np.zeros(size,dtype=np.float16)#定义一个与当前桢图像大小相同的的灰度图像矩阵
image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)#将当前桢图像转换成灰度图像(这里有修改)
cv2.equalizeHist(image, image)#灰度图像进行直方图等距化
#如下三行是设定最小图像的大小
divisor=32#(480,640)minSize和maxSize用来限制得到的目标区域的范围。如果视频中误检到很多无用的小方框,那么就把minSize的尺寸改大一些,默认的为30*30。
h, w = size
#print(h,w)
minSize=(int(w/divisor), int(h/divisor))#这里加了一个取整函数
#第三个参数表示每一个目标至少要被检测到3次才算是真的目标(因为周围的像素和不同的窗口大小都可以检测到人脸),
faceRects = classfier.detectMultiScale(image, 1.3,2, cv2.CASCADE_SCALE_IMAGE,minSize)#人脸检测
max=0
if len(faceRects)>0:#如果人脸数组长度大于0
for faceRect in faceRects: #对每一个人脸画矩形框
x, y, w, h = faceRect
if x*y>max :
max=x*y
max_x=x
max_y=y
max_h=h
max_w=w
print(x,y)
cv2.rectangle(frame, (max_x, max_y), (max_x+max_w, max_y+max_h), color,6)#矩形的两个点(左上角与右下角),颜色,线的类型(不设置就默认)
if(0):
if ser.isOpen() == False:
ser.open()
if ser.isOpen() == False:
break
try:
ser.write(str(x)) #相对蓝牙来说向外输出给用户
print ('输出:',x)
time.sleep(0.1) #必要的延时
engine.say('左转')
engine.runAndWait()
except:
ser.close()
print('退出3')
time.sleep(1)
sys.exit()
else :
print('没有识别到目标,停止')
if ser.isOpen() == False:
ser.open()
if ser.isOpen() == False:
break
try:
ser.write('0') #相对蓝牙来说向外输出给用户
time.sleep(0.1) #必要的延时
except:
ser.close()
print('退出3')
time.sleep(1)
sys.exit()
#engine.say('没有识别到目标,停止')
#engine.runAndWait()
cv2.imshow('face', frame)#显示图像
key=cv2.waitKey(1)#时间短,接收不到按键
c = chr(key & 255)
if c in ['q', 'Q', chr(27)]:
break
cap.release()
cv2.destroyWindow('face')
8、手势识别加蓝牙指令发送
from aip import AipBodyAnalysis
import pyttsx3,cv2
import numpy as np
import serial #包含相应的库
import sys,time
import pyttsx3
engine = pyttsx3.init()#语音初始化
""" 你的 APPID AK SK """
APP_ID = '14333301'
API_KEY = '0OoR1G35nUEKt1E2xagNcr6l'
SECRET_KEY = 'kUoHSSsbMuRcuBT2xWMdWXwXv57rnxW5'
client = AipBodyAnalysis(APP_ID, API_KEY, SECRET_KEY)#百度手势识别初始化
ser=serial.Serial("com4",9600,timeout=0.5)#winsows系统使用com1口连接串行口
cv2.namedWindow("face")#命名一个窗口
cap=cv2.VideoCapture(1)#打开1号摄像头
success, frame = cap.read()#读取一桢图像,前一个返回值是是否成功,后一个返回值是图像本身
color = (55,255,155)#设置人脸框的颜色
classfier=cv2.CascadeClassifier("D:\\zaw\\xml\\haarcascade_frontalface_alt2.xml")#定义分类器
max_x=0
max_y=0
max_h=0
max_w=0
max=0
def get_file_content(filePath):
with open(filePath, 'rb') as fp:
return fp.read()
engine.say('开始识别')
engine.runAndWait()
while 1:
success, frame = cap.read()
size=frame.shape[:2]#获得当前桢彩色图像的大小
#输入图像,尽量使用灰度图以加快检测速度
image=np.zeros(size,dtype=np.float16)#定义一个与当前桢图像大小相同的的灰度图像矩阵
image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)#将当前桢图像转换成灰度图像(这里有修改)
cv2.equalizeHist(image, image)#灰度图像进行直方图等距化
#如下三行是设定最小图像的大小
divisor=32#(480,640)minSize和maxSize用来限制得到的目标区域的范围。如果视频中误检到很多无用的小方框,那么就把minSize的尺寸改大一些,默认的为30*30。
h, w = size
print(h,w)
minSize=(int(w/divisor), int(h/divisor))#这里加了一个取整函数
#第三个参数表示每一个目标至少要被检测到3次才算是真的目标(因为周围的像素和不同的窗口大小都可以检测到人脸),
faceRects = classfier.detectMultiScale(image, 1.3,2, cv2.CASCADE_SCALE_IMAGE,minSize)#人脸检测
print(len(faceRects))
max=0
if len(faceRects)>0:#如果人脸数组长度大于0
for faceRect in faceRects: #对每一个人脸画矩形框
x, y, w, h = faceRect
if x*y>max :
max=x*y
max_x=x
max_y=y
max_h=h
max_w=w
cv2.imwrite('.\\img\\image0.jpg', frame)
image = get_file_content('.\\img\\image0.jpg')
cv2.rectangle(frame, (max_x, max_y), (max_x+max_w, max_y+max_h), color,2)#矩形的两个点(左上角与右下角),颜色,线的类型(不设置就默认)
""" 调用手势识别"""
data=client.gesture(image)
print (data)
if data['result_num']>0 :
for s in data['result'] :
classname=s['classname']
if classname=='Face' :
say='只识别出人脸,没有手势'
elif classname=='other' :
say='没有人脸,没有手势,请重新拍照'
else :
if classname=='Point' :
say='one'
break
elif classname=='Two' :
say='Two'
elif classname=='Three' :
say='Three'
break
elif classname=='Four' :
say='Four'
break
elif classname=='Six' :
say='Six'
if ser.isOpen() == False:
ser.open()
if ser.isOpen() == False:
break
try:
ser.write(b'L') #相对蓝牙来说向外输出给用户
print ('左转')
time.sleep(0.1) #必要的延时
engine.say('左转')
engine.runAndWait()
except:
ser.close()
print('退出3')
time.sleep(1)
sys.exit()
break
elif classname=='Seven' :
say='Seven'
break
elif classname=='Eight' :
say='Eight'
if ser.isOpen() == False:
ser.open()
if ser.isOpen() == False:
break
try:
ser.write(b'R') #相对蓝牙来说向外输出给用户
print ('右转')
time.sleep(0.1) #必要的延时
engine.say('右转')
engine.runAndWait()
except:
ser.close()
print('退出4')
time.sleep(1)
sys.exit()
break
elif classname=='Nine' :
say='Nine'
break
elif classname=='Rock' :
say='Rock'
break
elif classname=='Heart_3' :
say='双手比心'
break
elif classname=='Heart_1' :
say='双手比心'
break
elif classname=='Heart_2' :
say='双手比心'
break
elif classname=='Palm_up' :
say='掌心向上'
break
elif classname=='ILY' :
say='我爱你'
break
elif classname=='Thumb_down' :
say='Diss'
break
elif classname=='Thumb_up' :
say='点赞'
break
elif classname=='Heart_single' :
say='单手比心'
break
elif classname=='Honour' :
say='作别'
break
elif classname=='Congratulation' :
say='作揖'
break
elif classname=='Prayer' :
say='祈祷'
break
elif classname=='Ok' :
say='OK'
if ser.isOpen() == False:
ser.open()
if ser.isOpen() == False:
break
try:
ser.write(b'G') #相对蓝牙来说向外输出给用户
print ('前进')
time.sleep(0.1) #必要的延时
engine.say('前进')
engine.runAndWait()
except:
ser.close()
print('退出2')
time.sleep(1)
sys.exit()
break
elif classname=='Fist' :
say='拳头'
if ser.isOpen() == False:
ser.open()
if ser.isOpen() == False:
break
try:
ser.write(b'B') #相对蓝牙来说向外输出给用户
print ('后退')
time.sleep(0.1) #必要的延时
engine.say('停止')
engine.runAndWait()
except:
ser.close()
print('退出2')
time.sleep(1)
sys.exit()
break
elif classname=='Five' :
say='掌心向前'
if ser.isOpen() == False:
ser.open()
if ser.isOpen() == False:
break
try:
ser.write(b'S') #相对蓝牙来说向外输出给用户
print ('停止')
time.sleep(0.1) #必要的延时
engine.say('停止')
engine.runAndWait()
except:
ser.close()
print('退出3')
time.sleep(1)
sys.exit()
break
else :
print('没有人脸,没有手势,请重新拍照')
engine.say('没有人脸,没有手势,请重新拍照')
engine.runAndWait()
cv2.imshow('face', frame)#显示图像
key=cv2.waitKey(1)#时间短,接收不到按键
c = chr(key & 255)
if c in ['q', 'Q', chr(27)]:
break
cap.release()
cv2.destroyWindow('face')
9、Python显示窗口
import tkinter as tk
# 建立tkinter窗口,设置窗口标题
def btnHelloClicked():
labelHello.config(text = "Hello!")
top = tk.Tk()
top.title("Hello Test")
# 在窗口中创建标签
labelHello = tk.Label(top, text = "Hello Tkinter!")
labelHello.pack()
btn = tk.Button(top, text = "Hello", command = btnHelloClicked)
btn.pack()
# 运行并显示窗口
top.mainloop()
可以试试这个库:https://github.com/kootenpv/yagmail
发送邮件代码更简洁一些 pATAq 发表于 2020-6-30 22:08
可以试试这个库:https://github.com/kootenpv/yagmail
发送邮件代码更简洁一些
不错,收藏!
页:
[1]