图片换脸python源码+demo,deepfakes换脸源码

版权声明:本文为作者原创文章,转载请附上原文出处链接和本声明。
本文链接:https://www.weijc.cn/jdetail/1583809282896

在线工具演示点击这里:简捷工具网:http://www.shulijp.com/image/swapface/swapface.html

正脸转换:

import cv2
import dlib
import sys
import numpy as np
from PIL import Image

class TooManyFaces(Exception):
    pass

class NoFaces(Exception):
    pass

def get_landmarks(im):
    rects = detector(im, 1)
    if len(rects) > 1:
        raise TooManyFaces
    if len(rects) == 0:
        raise NoFaces
    return np.matrix([[p.x, p.y] for p in predictor(im, rects[0]).parts()])

def transformation_from_points(points1, points2):

    points1 = points1.astype(np.float64)
    points2 = points2.astype(np.float64)

    c1 = np.mean(points1, axis=0)
    c2 = np.mean(points2, axis=0)
    points1 -= c1
    points2 -= c2

    s1 = np.std(points1)
    s2 = np.std(points2)
    points1 /= s1
    points2 /= s2

    U, S, Vt = np.linalg.svd(points1.T * points2)

    R = (U * Vt).T

    return np.vstack([np.hstack(((s2 / s1) * R,c2.T - (s2 / s1) * R * c1.T)),np.matrix([0., 0., 1.])])


def get_eye(img_src,record):
    img = cv2.imread(img_src)
    gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(PREDICTOR_PATH)

    faces = detector(gray)
    for face in faces:
        landmarks = predictor(gray, face)
        face_matrix = np.matrix([[p.x, p.y] for p in landmarks.parts()])
        record[img_src] = face_matrix

def draw_convex_hull(im, points, color):
    points = cv2.convexHull(points)
    cv2.fillConvexPoly(im, points, color=color)

def get_face_mask(im, landmarks):
    im = np.zeros(im.shape[:2], dtype=np.float64)

    for group in OVERLAY_POINTS:
        draw_convex_hull(im,
                         landmarks[group],
                         color=1)

    im = np.array([im, im, im]).transpose((1, 2, 0))

    im = (cv2.GaussianBlur(im, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0) > 0) * 1.0
    im = cv2.GaussianBlur(im, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0)

    return im

def warp_im(im, M, dshape):
    output_im = np.zeros(dshape, dtype=im.dtype)
    cv2.warpAffine(im,
                   M[:2],
                   (dshape[1], dshape[0]),
                   dst=output_im,
                   borderMode=cv2.BORDER_TRANSPARENT,
                   flags=cv2.WARP_INVERSE_MAP)
    return output_im
def correct_colours(im1, im2, landmarks1,mhd):
    blur_amount = mhd * np.linalg.norm(
                              np.mean(landmarks1[LEFT_EYE_POINTS], axis=0) -
                              np.mean(landmarks1[RIGHT_EYE_POINTS], axis=0))
    blur_amount = int(blur_amount)
    if blur_amount % 2 == 0:
        blur_amount += 1
    im1_blur = cv2.GaussianBlur(im1, (blur_amount, blur_amount), 0)
    im2_blur = cv2.GaussianBlur(im2, (blur_amount, blur_amount), 0)

    # Avoid divide-by-zero errors.
    im2_blur += (128 * (im2_blur <= 1.0)).astype(im2_blur.dtype)

    return (im2.astype(np.float64) * im1_blur.astype(np.float64) /
                                                im2_blur.astype(np.float64))

def exchange(p1,p2,record):
    im1 = Image.open(p1)
    im1_eye = Image.open(p2+"_eye_resized.jpg")
    im1.paste(im1_eye,(record[p1][0],record[p1][1]))
    im1.save(p1+"_result.jpg")

    im2 = Image.open(p2)
    im2_eye = Image.open(p1+"_eye_resized.jpg")
    im2.paste(im2_eye,(record[p2][0],record[p2][1]))
    im2.save(p2+"_result.jpg")

def changeface(src1,src2,mhd):
    try:
        record = {}
        get_eye(src1, record)
        get_eye(src2, record)

        M = transformation_from_points(record[src1][ALIGN_POINTS],
                                       record[src2][ALIGN_POINTS])
        im1 = cv2.imread(src1)
        im2 = cv2.imread(src2)
        mask = get_face_mask(im2, record[src2])
        warped_mask = warp_im(mask, M, im1.shape)
        combined_mask = np.max([get_face_mask(im1, record[src1]), warped_mask],
                               axis=0)
        warped_im2 = warp_im(im2, M, im1.shape)
        warped_corrected_im2 = correct_colours(im1, warped_im2, record[src1],mhd)

        output_im = im1 * (1.0 - combined_mask) + warped_corrected_im2 * combined_mask
        src1 = "".join(src1.split('.')[:-1]) + "_swap.jpg"
        cv2.imwrite(src1, output_im)
        return src1
    except:
        print('整脸转换没有人脸')
        return ''

# 这里获取的是绝对路径
PREDICTOR_PATH = "/data/python/shape_predictor_68_face_landmarks.dat"
SCALE_FACTOR = 1
FEATHER_AMOUNT = 11

FACE_POINTS = list(range(17, 68))
MOUTH_POINTS = list(range(48, 61))
RIGHT_BROW_POINTS = list(range(17, 22))
LEFT_BROW_POINTS = list(range(22, 27))
RIGHT_EYE_POINTS = list(range(36, 42))
LEFT_EYE_POINTS = list(range(42, 48))
NOSE_POINTS = list(range(27, 35))
JAW_POINTS = list(range(0, 17))

# Points used to line up the images.
ALIGN_POINTS = (LEFT_BROW_POINTS + RIGHT_EYE_POINTS + LEFT_EYE_POINTS +
                               RIGHT_BROW_POINTS + NOSE_POINTS + MOUTH_POINTS)

# Points from the second image to overlay on the first. The convex hull of each
# element will be overlaid.
OVERLAY_POINTS = [
    LEFT_EYE_POINTS + RIGHT_EYE_POINTS + LEFT_BROW_POINTS + RIGHT_BROW_POINTS,
    NOSE_POINTS + MOUTH_POINTS,
]

# 在色彩校正期间使用的模糊量,作为瞳孔距离的一部分。
# mhd = 0.6
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(PREDICTOR_PATH)

只换眼睛和嘴巴:

import cv2
import dlib
import numpy as np
from PIL import Image


#替换眼睛和嘴巴
def get_eye(img_src,record, record1,record2):

    img = cv2.imread(img_src)
    gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor("/data/python/shape_predictor_68_face_landmarks.dat")
    # predictor = dlib.shape_predictor("./shape_predictor_68_face_landmarks.dat")

    faces = detector(gray)

    for face in faces:
        landmarks = predictor(gray, face)

        #右眼睛区域左上角点(x,y),右下角点(x0,y0)
        x, y, x0, y0 = landmarks.part(42).x - 5, min(landmarks.part(43).y, landmarks.part(44).y) - 5, landmarks.part(45).x + 5, max(landmarks.part(46).y, landmarks.part(47).y) + 5
        #左眼睛区域左上角点(x1,y1),右下角点(x2,y2)
        x1,y1,x2,y2 = landmarks.part(36).x-5,min(landmarks.part(37).y,landmarks.part(38).y)-5,landmarks.part(39).x+5,max(landmarks.part(40).y,landmarks.part(41).y)+5

        # 嘴巴区域左上角点(x3,y3),右下角点(x4,y4)
        x3, y3, x4, y4 = landmarks.part(48).x-5, min(landmarks.part(50).y,landmarks.part(52).y)-5, landmarks.part(54).x+5,landmarks.part(57).y +5

        cut = img[y:y0,x:x0]
        cut1 = img[y1:y2,x1:x2]
        cut2 = img[y3:y4,x3:x4]

        record[img_src] = (x, y, x0, y0)
        record1[img_src] = (x1,y1,x2,y2)
        record2[img_src] = (x3,y3,x4,y4)

        return cut,cut1,cut2

def changeface(p1,p2):
    try:
        record={}
        record1={}
        record2={}
        print(get_eye(p1,record,record1,record2))
        eye_right, eye_left, mouth1 = get_eye(p1,record,record1,record2)
        print(get_eye(p2,record,record1,record2))
        eye_right2, eye_left2, mouth2 = get_eye(p2,record,record1,record2)

        eye_right_resized = cv2.resize(eye_right,(record[p2][2]-record[p2][0],record[p2][3]-record[p2][1]),interpolation=cv2.INTER_AREA)
        eye_left_resized = cv2.resize(eye_left, (record1[p2][2] - record1[p2][0], record1[p2][3] - record1[p2][1]),interpolation=cv2.INTER_AREA)

        eye_right2_resized = cv2.resize(eye_right2,(record[p1][2]-record[p1][0],record[p1][3]-record[p1][1]),interpolation=cv2.INTER_AREA)
        eye_left2_resized = cv2.resize(eye_left2, (record1[p1][2] - record1[p1][0], record1[p1][3] - record1[p1][1]),interpolation=cv2.INTER_AREA)

        mouth1_resized = cv2.resize(mouth1,(record2[p2][2]-record2[p2][0],record2[p2][3]-record2[p2][1]),interpolation=cv2.INTER_AREA)
        mouth2_resized = cv2.resize(mouth2, (record2[p1][2] - record2[p1][0], record2[p1][3] - record2[p1][1]),interpolation=cv2.INTER_AREA)

        im = cv2.imread(p1)
        obj = eye_right2_resized
        # Create an all white mask
        mask = 255 * np.ones(obj.shape, obj.dtype)
        center = (int((record[p1][0] + record[p1][2]) / 2), int((record[p1][1] + record[p1][3]) / 2))
        # Seamlessly clone src into dst and put the results in output
        normal_clone = cv2.seamlessClone(obj, im, mask, center, cv2.NORMAL_CLONE)

        im1 = normal_clone
        obj1 = eye_left2_resized
        # Create an all white mask
        mask1 = 255 * np.ones(obj1.shape, obj1.dtype)
        center1 = (int((record1[p1][0] + record1[p1][2]) / 2), int((record1[p1][1] + record1[p1][3]) / 2))
        # Seamlessly clone src into dst and put the results in output
        normal_clone1 = cv2.seamlessClone(obj1, im1, mask1, center1, cv2.NORMAL_CLONE)

        im2 = normal_clone1
        obj2 = mouth2_resized
        mask2 = 255 * np.ones(obj2.shape, obj2.dtype)
        center2 = (int((record2[p1][0] + record2[p1][2]) / 2), int((record2[p1][1] + record2[p1][3]) / 2))
        normal_clone2 = cv2.seamlessClone(obj2, im2, mask2, center2, cv2.NORMAL_CLONE)
        p1 = "".join(p1.split('.')[:-1]) + "_ted.jpg"
        cv2.imwrite(p1, normal_clone2)
        return p1;
    except:
        print('换眼镜和嘴巴没有人脸')
        return ''

安装教程请看这里:

https://www.weijc.cn/jdetail/1583809728367

展开阅读全文
还能输入1000个字符