当前位置:网站首页>[interface] pyqt5 and swing transformer for face recognition
[interface] pyqt5 and swing transformer for face recognition
2022-06-26 22:56:00 【Hard working yuan】
be based on python, Use pyqt5 Module and swin transformer Detection algorithm for face recognition
List of articles
Tools
Language :
python
Main library :
pyqt5
Test model :
swin transformer
Core code
import time, cv2
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import QFileDialog, QMainWindow
from PyQt5.QtCore import QTimer, QDateTime
from PyQt5.Qt import QThread, pyqtSignal, QMutex
import qimage2ndarray
from queue import Queue
from Project import Ui_Form
from infer import *
video_steam = Queue()
class Thread1(QThread): # Threads 1
# Use custom signals , Remember that signals are class variables , Must be defined in the class , Cannot define... In an instance method ,
thread1_signal2 = pyqtSignal(object) # Defining signals , Define the parameter as object type
def __init__(self):
super(Thread1, self).__init__()
self.t = 0
self.Image_thread1 = None
self.mutex = QMutex() # Create thread lock
self._isPause = False
def run(self):
while True:
self.mutex.lock() # Lock
time.sleep(0.001) # Sleep
_, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
self.Image_thread1 = frame
self.thread1_signal2.emit(self.Image_thread1) # Signal transmission
self.mutex.unlock() # Unlock
class Thread2(QThread): # Threads 2
thread1_signal3 = pyqtSignal(object)
def __init__(self):
super(Thread2, self).__init__()
self.t = 0
self.Image_thread1 = None
self.mutex = QMutex()
self._isPause = False
self.video_flag = 0
def run(self):
while True:
self.mutex.lock()
time.sleep(0.001)
# time.sleep(1 / self.a) Change the frame rate here
_, frame = cap.read()
config_file = 'mask_rcnn_swin_tiny_patch4_window7_mstrain_480-800_adamw_1x_coco.py'
checkpoint_file = 'epoch_100.pth'
classes = ['The color of silica gel is abnormal', 'The color of silica gel is normal', 'The door is open',
'The door is close', 'breakage', 'dirt', 'rust', 'foreign object', 'oil leakage', 'animal',
'hat', 'person']
model = init_detector(config_file, checkpoint_file, device='cuda:0')
frame, result_label = InferResult(frame, config_file, checkpoint_file, classes, model)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if self.video_flag == 0:
video_steam.put(frame)
self.Image_thread1 = frame
self.thread1_signal3.emit(self.Image_thread1)
self.mutex.unlock()
class PyQtMainEntry(QMainWindow, Ui_Form):
def __init__(self):
super().__init__()
self.setupUi(self)
self.mark = 0
self.btnReadImage.clicked.connect(self.btnReadImage_Clicked)
self.btnShowCamera.clicked.connect(self.btnOpenCamera_Clicked)
self.btnStartLabel.clicked.connect(self.startRecognize)
self.btnSaveResult.clicked.connect(self.resultsave)
self.time()
def btnReadImage_Clicked(self):
self.mark = 0
filename, _ = QFileDialog.getOpenFileName(self, ' open ')
if filename:
self.captured = cv2.imread(str(filename))
self.captured = cv2.cvtColor(self.captured, cv2.COLOR_BGR2RGB)
rows, cols, channels = self.captured.shape
bytesPerLine = channels * cols
QImg = QImage(self.captured.data, cols, rows, bytesPerLine, QImage.Format_RGB888)
self.Videolabel.setPixmap(QPixmap.fromImage(QImg).scaled(
self.Videolabel.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation))
self.Videolabel.setScaledContents(True)
def btnOpenCamera_Clicked(self):
self.mark = 1
self.process_thread = Thread2()
self.process_thread.thread1_signal3.connect(self.thread2_work2)
self.preview_thread = Thread1()
self.preview_thread.thread1_signal2.connect(self.thread1_work2)
self.preview_thread.start()
def thread1_work2(self, img):
self.Image = img
qimg = qimage2ndarray.array2qimage(img)
self.Videolabel.setPixmap(QPixmap(qimg))
self.Videolabel.show()
self.Videolabel.setScaledContents(True)
def thread2_work2(self, img):
self.Image = img
qimg = qimage2ndarray.array2qimage(img)
self.DetectImagelabel.setPixmap(QPixmap(qimg))
self.DetectImagelabel.show()
self.DetectImagelabel.setScaledContents(True)
self.Videolabel_2.setPixmap(QPixmap(qimg))
self.Videolabel_2.show()
self.Videolabel_2.setScaledContents(True)
def startRecognize(self):
if self.mark == 0:
img = self.captured
config_file = 'mask_rcnn_swin_tiny_patch4_window7_mstrain_480-800_adamw_1x_coco.py'
checkpoint_file = 'epoch_100.pth'
classes = ['The color of silica gel is abnormal', 'The color of silica gel is normal', 'The door is open',
'The door is close', 'breakage', 'dirt', 'rust', 'foreign object', 'oil leakage', 'animal',
'hat', 'person']
model = init_detector(config_file, checkpoint_file, device='cuda:0')
draw_1, result_label = InferResult(img, config_file, checkpoint_file, classes, model)
self.result = draw_1
draw_2 = qimage2ndarray.array2qimage(draw_1)
self.DetectImagelabel.setPixmap(QPixmap(draw_2))
self.DetectImagelabel.setScaledContents(True)
self.DetectImagelabel.show()
else:
self.process_thread.start()
# Display time
def showCurrentTime(self, timeLabel):
time = QDateTime.currentDateTime()
self.timeDisplay = time.toString('yyyy-MM-dd hh:mm:ss dddd')
timeLabel.setText(self.timeDisplay)
def time(self):
self.timer = QTimer()
self.timer.timeout.connect(lambda: self.showCurrentTime(self.label_2))
self.timer.start()
def resultsave(self):
path_filename = QFileDialog.getExistingDirectory(self, ' Save the results ')
if path_filename:
self.saveImage = cv2.cvtColor(self.result, cv2.COLOR_RGB2BGR)
cv2.imwrite(path_filename + '/' + self.timeDisplay[:10]
+ '_' + str(10) + '.png', self.saveImage)
# According to the path
self.PathLineEdit.setText(path_filename)
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
cap = cv2.VideoCapture(0)
window = PyQtMainEntry()
window.show()
sys.exit(app.exec_())
Running results

Source code
边栏推荐
- 电子协会 C语言 1级 30 、 等差数列末项计算
- 中金证券经理的开户链接开户买股票安全吗?有谁知道啊
- 【强基计划】数学与物理竞赛中的微积分部分视频
- Unity animation knowledge of Art
- Brief analysis of the self inspection contents of the blue team in the attack and defense drill
- Design of master-slave replication system
- 用C#通过sql语句操作Sqlserver数据库教程
- 论文解读(LG2AR)《Learning Graph Augmentations to Learn Graph Representations》
- [Old Wei makes machines] issue 090: keyboard? host? Full function keyboard host!
- 【界面】pyqt5和Swin Transformer对人脸进行识别
猜你喜欢

Restfultoolkitx of idea utility plug-in -- restful interface debugging

【图像处理基础】基于matlab GUI图像曲线调整系统【含Matlab源码 1923期】

vulnhub之dc8

Some ways out for older programmers

Raspberry pie preliminary use

Comprehensive evaluation of online collaboration documents: note, flowus, WOLAI, Feishu, YuQue, Microsoft office, Google Docs, Jinshan docs, Tencent docs, graphite docs, Dropbox paper, nutcloud docs,

Word chess based on heuristic search

在线协作文档综合评测 :Notion、FlowUs、Wolai、飞书、语雀、微软 Office、谷歌文档、金山文档、腾讯文档、石墨文档、Dropbox Paper、坚果云文档、百度网盘在线文档

leetcode:141. 环形链表【哈希表 + 快慢指针】

大龄程序员的一些出路
随机推荐
Electronic Society C language level 1 31. Calculate line segment length
What is the “ How to remove a custom form list?
Why don't I recommend going to sap training institution for training?
【混合编程jni 】第九篇之Jni总结
Weaving dream collection plug-ins are recommended to be free collection plug-ins
Data governance does everything
leetcode:1567. 乘积为正数的最长子数组长度【dp[i]表示以i结尾的最大长度】
Electronic Society C language level 1 30, calculation of last term of arithmetic sequence
Raspberry pie preliminary use
Selenium电脑上怎么下载-Selenium下载和安装图文教程[超详细]
Microservices, an important part of cloud native architecture
vulnhub之dc8
【图像处理基础】基于matlab GUI图像曲线调整系统【含Matlab源码 1923期】
Parsing complex JSON in fluent
大龄程序员的一些出路
在哪家券商公司开户最方便最安全可靠
CVPR 2022 - Interpretation of selected papers of meituan technical team
【混合编程jni 】第七篇之JNI 的命令行们
【LeetCode】1984. Minimum difference between highest and lowest of K scores
WP collection plug-in tutorial no thanks for WordPress collection of rules