增强人脸识别系统的功能与性能-用户界面

时间:2024-11-19 07:33:00

为了使用户更容易使用和配置人脸识别系统,我们将开发一个图形用户界面(GUI)。这将提供一个友好的界面,让用户能够方便地进行系统配置和管理。

实现思路

  1. 使用 PyQt 或 Tkinter

    • 使用 PyQtTkinter 等库来开发图形用户界面。
    • 提供配置文件编辑、摄像头选择、人脸识别结果展示等功能。
  2. 实时预览

    • 在界面上提供实时视频预览窗口,显示当前摄像头的视频流。
    • 支持多摄像头选择和切换。
  3. 用户配置

    • 提供用户配置界面,允许用户修改识别距离、模型选择等参数。
    • 保存用户配置到配置文件中,以便下次启动时加载。

示例代码

import sys
import cv2
import face_recognition
import psycopg2
from PyQt5.QtWidgets import QApplication, QMainWindow, QPushButton, QVBoxLayout, QWidget, QLabel, QComboBox, QLineEdit
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtCore import QTimer

class FaceRecognitionGUI(QMainWindow):
    def __init__(self, db_config, config_file='config.ini'):
        super().__init__()
        self.db_config = db_config
        self.config = configparser.ConfigParser()
        self.config.read(config_file)
        self.tolerance = float(self.config.get('FaceRecognition', 'tolerance'))
        self.model = self.config.get('FaceRecognition', 'model')
        self.known_face_encodings, self.known_face_names, self.tolerances = self.load_faces_from_db()
        self.initUI()

    def initUI(self):
        self.setWindowTitle('Face Recognition System')
        self.setGeometry(100, 100, 800, 600)

        self.central_widget = QWidget()
        self.setCentralWidget(self.central_widget)

        layout = QVBoxLayout()

        self.video_label = QLabel(self)
        layout.addWidget(self.video_label)

        self.camera_combo = QComboBox(self)
        self.camera_combo.addItems(['Camera 0', 'Camera 1'])
        layout.addWidget(self.camera_combo)

        self.tolerance_input = QLineEdit(self)
        self.tolerance_input.setText(str(self.tolerance))
        layout.addWidget(self.tolerance_input)

        self.start_button = QPushButton('Start Recognition', self)
        self.start_button.clicked.connect(self.start_real_time_face_recognition)
        layout.addWidget(self.start_button)

        self.central_widget.setLayout(layout)

        self.timer = QTimer()
        self.timer.timeout.connect(self.update_frame)

    def load_faces_from_db(self):
        known_face_encodings = []
        known_face_names = []
        tolerances = []

        conn = psycopg2.connect(**self.db_config)
        cursor = conn.cursor()
        cursor.execute("SELECT name, encoding, tolerance FROM faces")
        rows = cursor.fetchall()
        for row in rows:
            name, encoding_str, tolerance = row
            encoding = np.fromstring(encoding_str, dtype=float, sep=' ')
            known_face_encodings.append(encoding)
            known_face_names.append(name)
            tolerances.append(tolerance)

        cursor.close()
        conn.close()

        return known_face_encodings, known_face_names, tolerances

    def update_frame(self):
        ret, frame = self.video_capture.read()
        if ret:
            rgb_frame = frame[:, :, ::-1]
            face_locations = face_recognition.face_locations(rgb_frame, model=self.model)
            face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)

            for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
                name = "Unknown"
                min_distance = float('inf')
                best_match_index = -1

                for i, (known_encoding, known_name, known_tolerance) in enumerate(zip(self.known_face_encodings, self.known_face_names, self.tolerances)):
                    distance = face_recognition.face_distance([known_encoding], face_encoding)[0]
                    if distance < min_distance and distance <= known_tolerance:
                        min_distance = distance
                        name = known_name
                        best_match_index = i

                cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
                cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
                font = cv2.FONT_HERSHEY_DUPLEX
                cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)

            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            h, w, ch = frame.shape
            bytes_per_line = ch * w
            q_image = QImage(frame.data, w, h, bytes_per_line, QImage.Format_RGB888)
            self.video_label.setPixmap(QPixmap.fromImage(q_image))

    def start_real_time_face_recognition(self):
        camera_index = int(self.camera_combo.currentText().split()[-1])
        self.video_capture = cv2.VideoCapture(camera_index)
        self.timer.start(30)

    def closeEvent(self, event):
        self.timer.stop()
        self.video_capture.release()
        event.accept()

# 示例用法
if __name__ == "__main__":
    db_config = {
        'dbname': 'your_dbname',
        'user': 'your_user',
        'password': 'your_password',
        'host': 'localhost',
        'port': '5432'
    }

    app = QApplication(sys.argv)
    face_recognition_gui = FaceRecognitionGUI(db_config)
    face_recognition_gui.show()
    sys.exit(app.exec_())

通过这些改进,我们希望能够进一步提升人脸识别系统的功能和性能,使其更加适用于各种实际应用场景。