This week, I started to do something with the face recognition. My first task was writing a program on my laptop that could distinguish whether the faces from two photos were from the same person.

here is my code👇

import cv2
import dlib
import numpy as np

# Load Dlib's face detection and face recognition models
detector = dlib.get_frontal_face_detector()
shape_predictor = dlib.shape_predictor('./data/shape_predictor_68_face_landmarks.dat')
face_encoder = dlib.face_recognition_model_v1('./data/dlib_face_recognition_resnet_model_v1.dat')

def get_face_encoding(image_path):
    # Load image
    img = cv2.imread(image_path)
    # Convert to RGB (Dlib uses RGB images)
    img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    # Detect faces
    faces = detector(img_rgb)
    
    if len(faces) == 0:
        raise ValueError("No face found in the image.")
        
    # Get the first face's landmarks
    landmarks = shape_predictor(img_rgb, faces[0])
    # Get the face encoding
    encoding = face_encoder.compute_face_descriptor(img_rgb, landmarks)
    
    return np.array(encoding)

def compare_faces(image_path1, image_path2):
    # Get face encodings
    encoding1 = get_face_encoding(image_path1)
    encoding2 = get_face_encoding(image_path2)
    
    # Compute the distance between the encodings
    distance = np.linalg.norm(encoding1 - encoding2)
    
    # Use a threshold to determine if they are the same person
    threshold = 0.5  # You can adjust this threshold
    return distance < threshold

# Paths to your images
image_path1 = 'image/output/output_image.jpg'
image_path2 = 'image/data/rain.jpeg'

# Compare the images
try:
    if compare_faces(image_path1, image_path2):
        print("The images are of the same person.")
    else:
        print("The images are of different people.")
except ValueError as e:
    print(e)

Then, I put some photos from my classmates into the program

Screenshot 2024-11-29 at 22.13.18.png

Screenshot 2024-11-29 at 22.13.29.png

And if I put two pictures from the same person, the program can tell me that the images are from the same person👇

Screenshot 2024-11-29 at 22.18.12.png

Oppositely, the program can tell me the images are from different people👇

Screenshot 2024-11-29 at 22.12.56.png

Then, I added some additional codes that let the camera on my laptop take a photo and distinguish it.

And here is my code👇

main.py

import cv2
import dlib
import numpy as np
import TakePhoto

TakePhoto.take_photo()

# Load Dlib's face detection and face recognition models
detector = dlib.get_frontal_face_detector()
shape_predictor = dlib.shape_predictor('./data/shape_predictor_68_face_landmarks.dat')
face_encoder = dlib.face_recognition_model_v1('./data/dlib_face_recognition_resnet_model_v1.dat')

def get_face_encoding(image_path):
    # Load image
    img = cv2.imread(image_path)
    # Convert to RGB (Dlib uses RGB images)
    img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    # Detect faces
    faces = detector(img_rgb)
    
    if len(faces) == 0:
        raise ValueError("No face found in the image.")
        
    # Get the first face's landmarks
    landmarks = shape_predictor(img_rgb, faces[0])
    # Get the face encoding
    encoding = face_encoder.compute_face_descriptor(img_rgb, landmarks)
    
    return np.array(encoding)

def compare_faces(image_path1, image_path2):
    # Get face encodings
    encoding1 = get_face_encoding(image_path1)
    encoding2 = get_face_encoding(image_path2)
    
    # Compute the distance between the encodings
    distance = np.linalg.norm(encoding1 - encoding2)
    
    # Use a threshold to determine if they are the same person
    threshold = 0.5  # You can adjust this threshold
    return distance < threshold

# Paths to your images
image_path1 = 'image/output/output_image.jpg'
image_path2 = 'image/data/rain.jpeg'

# Compare the images
try:
    if compare_faces(image_path1, image_path2):
        print("The images are of the same person.")
    else:
        print("The images are of different people.")
except ValueError as e:
    print(e)

TakePhoto.py

import cv2
import numpy as np

def take_photo():
    # Initialize the camera
    
    cap = cv2.VideoCapture(0)

    # Check if the camera opened successfully
    if not cap.isOpened():
        print("Error: Could not open camera.")
        exit()

    # Read a frame from the camera
    ret, frame = cap.read()

    # Check if the frame was read correctly
    if not ret:
        print("Error: Could not read frame.")
    else:
        # Increase brightness by adding a constant value to all pixels
        #//brightness_value = 50  # Adjust this value as needed
        #//bright_frame = cv2.convertScaleAbs(frame, alpha=1, beta=brightness_value)

        # Display the original and the brightened image
        cv2.imshow('Original Image', frame)
        #//cv2.imshow('Brightened Image', bright_frame)

        # Save the brightened image to a file
        cv2.imwrite('./image/output/output_image.jpg', frame)

        # Wait for a key press and close the windows
        #//cv2.waitKey(0)

    # Release the camera and close all OpenCV windows
    cap.release()
    cv2.destroyAllWindows()

And here is the result👇

https://www.youtube.com/watch?v=4e5J3Dpy2eo