python怎么画人像_教你如何用Python画出心目中的自己

时间:2025-03-06 20:21:05

原标题:教你如何用Python画出心目中的自己

引言:人脸图像的生成在各个行业有着重要应用,例如刑事调查、人物设计、教育培训等。然而一幅逼真的人脸肖像,对于职业画家也要至少数小时才能绘制出来;对于从未接触过绘画的新手,就更是难如登天了。新手绘制出来的人脸草图往往非常简陋抽象,甚至有不匀称、不完整。但如果使用智能人脸画板,无疑是有如神助。

本项目主要来源于中科院和香港城市大学的一项研究DeepFaceDrawing,论文标题是《DeepFaceDrawing: DeepGeneration of Face Images from Sketches》

具体效果如下图可见:

实验前的准备

首先我们使用的python版本是3.6.5所用到的模块如下:

Pyqt5模块:PyQt5是基于Digia公司强大的图形程式框架Qt5的python接口,由一组python模块构成。PyQt5本身拥有超过620个类和6000函数及方法。在可以运行于多个平台,包括:Unix, Windows, and Mac OS。

opencv是将用来进行图像处理和生成。

numpy模块用来处理矩阵运算。

Jittor模块国内清华大学开源的深度学习框架。

_thread是多线程库。

网络模型的定义和训练

首先这个图像合成模块采用了一种利用发生器和鉴别器的GAN结构。从融合的特征图生成真实的人脸图像。鉴别器采用多尺度鉴别方式:对输入进行尺度划分,特征图和生成的图像在三个不同的层次上,经过三个不同的过程。:

(1)权重网络层和损失定义:

def weights_init_normal(m):

classname = m.__class__.__name__

("Conv") != -1:

.gauss_(,0.0, 0.02)

("BatchNorm") != -1:

.gauss_(,1.0, 0.02)

.constant_(,0.0)

def get_norm_layer(norm_type='instance'):

if (norm_type == 'batch'):

norm_layer =

elif (norm_type == 'instance'):

norm_layer =nn.InstanceNorm2d

else:

raiseNotImplementedError(('normalization layer [%s] is not found' % norm_type))

return norm_layer

class MSELoss:

def __init__(self):

pass

def __call__(self, output,target):

from importmse_loss

return mse_loss(output,target)

class BCELoss:

def __init__(self):

pass

def __call__(self, output,target):

from importbce_loss

return bce_loss(output,target)

(2)模型特征编解码:

特征匹配模块包含5个译码网络,以compact作为输入由分量流形得到的特征向量,并将其转换为对应的特征向量为后续生成的特征图的大小。

def define_part_encoder(model='mouth', norm='instance', input_nc=1,latent_dim=512):

norm_layer =get_norm_layer(norm_type=norm)

image_size = 512

if 'eye' in model:

image_size = 128

elif 'mouth' in model:

image_size = 192

elif 'nose' in model:

image_size = 160

elif 'face' in model:

image_size = 512

else:

print("Whole Image!!")

net_encoder =EncoderGenerator_Res(norm_layer,image_size,input_nc, latent_dim) # input longsize 256 to 512*4*4

print("net_encoder of part"+model+" is:",image_size)

return net_encoder

def define_part_decoder(model='mouth', norm='instance', output_nc=1,latent_dim=512):

norm_layer =get_norm_layer(norm_type=norm)

image_size = 512

if 'eye' in model:

image_size = 128

elif 'mouth' in model:

image_size = 192

elif 'nose' in model:

image_size = 160

else:

print("Whole Image!!")

net_decoder =DecoderGenerator_image_Res(norm_layer,image_size,output_nc, latent_dim) # input longsize 256 to 512*4*4

print("net_decoder to imageof part "+model+" is:",image_size)

return net_decoder

def define_feature_decoder(model='mouth', norm='instance', output_nc=1,latent_dim=512):

norm_layer =get_norm_layer(norm_type=norm)

image_size = 512

if 'eye' in model:

image_size = 128

elif 'mouth' in model:

image_size = 192

elif 'nose' in model:

image_size = 160

else:

print("Whole Image!!")

net_decoder =DecoderGenerator_feature_Res(norm_layer,image_size,output_nc, latent_dim) # input longsize 256 to 512*4*4

print("net_decoder to imageof part "+model+" is:",image_size)

# print(net_decoder)

return net_decoder

def define_G(input_nc, output_nc, ngf, n_downsample_global=3,n_blocks_global=9, norm='instance'):

norm_layer =get_norm_layer(norm_type=norm)

netG = GlobalGenerator(input_nc,output_nc, ngf, n_downsample_global, n_blocks_global, norm_layer)

return netG

图形界面的定义

在这篇论文中,作者一方面将人脸关键区域(双眼、鼻、嘴和其他区域)作为面元,学习其特征嵌入,将输入草图的对应部分送到由数据库样本中面元的特征向量构成的流形空间进行校准。另一方面,参考 pix2pixHD [5]的网络模型设计,使用 conditional GAN 来学习从编码的面元特征到真实图像的映射生成结果。

(1)鼠标绘制函数的定义:

class OutputGraphicsScene(QGraphicsScene):

def __init__(self, parent=None):

QGraphicsScene.__init__(self, parent)

# = mode_list

self.mouse_clicked = False

self.prev_pt = None

(0,0,(),())

# self.masked_image = None

= 0

# save the history of edit

= []

self.ori_img = ((512,512, 3),dtype=np.uint8)*255

self.mask_put = 1 # 1 marksuse brush while 0 user erase

= False

# (0 ,0)

= True

self.convert_on = False

def reset(self):

= False

self.ori_img = ((512,512, 3),dtype=np.uint8)*255

(True)

self.prev_pt = None

def setSketchImag(self,sketch_mat, mouse_up=False):

self.ori_img =sketch_mat.copy()

self.image_list = []

self.image_list.append(self.ori_img.copy() )

def mousePressEvent(self,event):

if not self.mask_put == 1:

self.mouse_clicked =True

self.prev_pt = None

else:

self.make_sketch(())

def make_sketch_Eraser(self,pts):

if len(pts)>0:

for pt in pts:

(self.color_img,pt['prev'],pt['curr'],self.paint_color,self.paint_size)

(self.mask_img,pt['prev'],pt['curr'],(0,0,0),self.paint_size )

()

def modify_sketch(self, pts):

if len(pts)>0:

for pt in pts:

(self.ori_img,pt['prev'],pt['curr'],self.paint_color,self.paint_size)

()

def get_stk_color(self, color):

self.stk_color = color

def erase_prev_pt(self):

self.prev_pt = None

def reset_items(self):

for i inrange(len(())):

item = ()[0]

(item)

def undo(self):

iflen(self.image_list)>1:

num =len(self.image_list)-2

self.ori_img =self.image_list[num].copy()

self.image_list.pop(num+1)

(True)

def getImage(self):

returnself.ori_img*(1-self.mask_img) +self.color_img*self.mask_img

defupdatePixmap(self,mouse_up=False):

sketch = self.ori_img

qim = QImage(,[1], [0], QImage.Format_RGB888)

if :

self.reset_items()

=((qim))

= False

else:

((qim))

def fresh_board(self):

print('======================================================')

while(True):

if(self.convert_on):

print('======================================================')

(100)

iter_start_time =()

()

print('TimeSketch:',() - iter_start_time)

(2)GUI界面:其核心思路并非直接用输入草图作为网络生成条件,而是将人脸进行分块操作后利用数据驱动的思想对抽象的草图特征空间进行隐式建模,并在这个流形空间中找到输入草图特征的近邻组合来重构特征,进而合成人脸图像。

class WindowUI(,Ui_SketchGUI):

def __init__(self):

super(WindowUI,self).__init__()

(self)

()

self._translate =

self.output_img = None

self.brush_size =()

self.eraser_size =()

= [0,1,0] #0marks the eraser, 1 marks the brush

self.Modify_modes = [0,1,0]#0 marks the eraser, 1 marks the brush

self.output_scene =OutputGraphicsScene()

(self.output_scene)

( | )

()

()

self.output_view =QGraphicsView(self.output_scene)

#self.output_view.fitInView(self.output_scene.updatePixmap())

self.input_scene =InputGraphicsScene(, self.brush_size,self.output_scene)

(self.input_scene)

( | )

()

()

self.input_scene.convert_on= self.RealTime_checkBox.isChecked()

self.output_scene.convert_on= self.RealTime_checkBox.isChecked()

self.BrushNum_label.setText(self._translate("SketchGUI",str(self.brush_size)))

self.EraserNum_label.setText(self._translate("SketchGUI",str(self.eraser_size)))

self.start_time =()

# self.

# try:

# # thread.start_new_thread(self.output_scene.fresh_board,())

# thread.start_new_thread(self.input_scene.thread_shadow,())

# except:

# print("Error: unable to startthread")

# print("Finish")

def setEvents(self):

self.Undo_Button.()

self.Brush_Button.(self.brush_mode)

(self.brush_change)

self.Clear_Button.()

self.Eraser_Button.(self.eraser_mode)

(self.eraser_change)

self.Save_Button.()

#weight bar

self.part0_Slider.()

self.part1_Slider.()

self.part2_Slider.()

self.part3_Slider.()

self.part4_Slider.()

self.part5_Slider.()

self.Load_Button.()

self.Convert_Sketch.()

self.RealTime_checkBox.(self.convert_on)

self.Shadow_checkBox.(self.shadow_on)

self.Female_Button.(self.choose_Gender)

self.Man_Button.(self.choose_Gender)

()

def mode_select(self, mode):

for i inrange(len()):

[i] = 0

[mode] = 1

def brush_mode(self):

self.mode_select(1)

self.brush_change()

().showMessage("Brush")

def eraser_mode(self):

self.mode_select(0)

self.eraser_change()

().showMessage("Eraser")

def undo(self):

self.input_scene.undo()

self.output_scene.undo()

def brush_change(self):

self.brush_size =()

self.BrushNum_label.setText(self._translate("SketchGUI",str(self.brush_size)))

if [1]:

self.input_scene.paint_size = self.brush_size

self.input_scene.paint_color = (0,0,0)

().showMessage("Change Brush Size in ",self.brush_size)

def eraser_change(self):

self.eraser_size =()

self.EraserNum_label.setText(self._translate("SketchGUI",str(self.eraser_size)))

if [0]:

print( self.eraser_size)

self.input_scene.paint_size = self.eraser_size

self.input_scene.paint_color = (1,1,1)

().showMessage("Change Eraser Size in ",self.eraser_size)

def changePart(self):

self.input_scene.part_weight['eye1'] = self.part0_Slider.value()/100

self.input_scene.part_weight['eye2']= self.part1_Slider.value()/100

self.input_scene.part_weight['nose'] = self.part2_Slider.value()/100

self.input_scene.part_weight['mouth'] = self.part3_Slider.value()/100

self.input_scene.part_weight[''] = self.part4_Slider.value()/100

self.input_scene.start_Shadow()

#self.input_scene.updatePixmap()

def changAllPart(self):

value =self.part5_Slider.value()

self.part0_Slider.setProperty("value", value)

self.part1_Slider.setProperty("value", value)

self.part2_Slider.setProperty("value", value)

self.part3_Slider.setProperty("value", value)

self.part4_Slider.setProperty("value", value)

()

def clear(self):

self.input_scene.reset()

self.output_scene.reset()

self.start_time =()

self.input_scene.start_Shadow()

().showMessage("Clear Drawing Board")

def convert(self):

().showMessage("Press Convert")

self.input_scene.convert_RGB()

self.output_scene.updatePixmap()

def open(self):

fileName, _ =(self, "Open File",

(),"Images Files (*.*)") #jpg;*.jpeg;*.png

if fileName:

image =QPixmap(fileName)

mat_img =(fileName)

mat_img = (mat_img,(512, 512), interpolation=cv2.INTER_CUBIC)

mat_img =(mat_img, cv2.COLOR_RGB2BGR)

if ():

(self, "Image Viewer",

"Cannotload %s." % fileName)

return

#('open',mat_img)

self.input_scene.start_Shadow()

self.input_scene.setSketchImag(mat_img)

def saveFile(self):

cur_time =strftime("%Y-%m-%d-%H-%M-%S", gmtime())

file_dir ='./saveImage/'+cur_time

if (file_dir) :

(file_dir)

(file_dir+'/',self.input_scene.sketch_img*255)

(file_dir+'/',(self.output_scene.ori_img,cv2.COLOR_BGR2RGB))

print(file_dir)

def convert_on(self):

# ifself.RealTime_checkBox.isCheched():

print('self.RealTime_checkBox',self.input_scene.convert_on)

self.input_scene.convert_on= self.RealTime_checkBox.isChecked()

self.output_scene.convert_on= self.RealTime_checkBox.isChecked()

def shadow_on(self):

_translate =

self.input_scene.shadow_on =not self.input_scene.shadow_on

self.input_scene.updatePixmap()

ifself.input_scene.shadow_on:

().showMessage("Shadow ON")

else:

().showMessage("Shadow OFF")

def choose_Gender(self):

ifself.Female_Button.isChecked():

self.input_scene.sex = 1

else:

self.input_scene.sex = 0

self.input_scene.start_Shadow()

总结

这里给出模型的体验网址:

:3000/index_621.html

该方法核心亮点之一,便是以多通道特征图作为中间结果来改善信息流。从本质上看,这是将输入草图作为软约束来替代传统方法中的硬约束,因此能够用粗糙甚至不完整的草图来生成高质量的完整人脸图像。

反思DeepFaceDrawing

1)画不出丑脸:

从图中可以看出,即使给出丑陋的草图,输出的也会是平均来说漂亮的人脸,这大概是因为所用的训练数据集都是名人,平均“颜值”较高,因此神经网络学到了一种漂亮的平均;这能算是一种在“颜值上的”数据不平衡问题吗。

2)安全问题

比如人脸支付场景中,可能存在利用该项技术盗刷的问题。随着人脸活体检测技术的发展,这种隐患应该能得以有效避免。

3)技术攻击性

相比于Deepfake,本文的DeepFaceDrawing应该算是相对无害的。

4)商业价值

如论文作者所说,这项技术在犯罪侦查、人物设计、教育培训等方面都可以有所作为。期待有一天这项技术更加通用,这样一来其商业价值会更大。

[声明]本文版权归原作者所有,内容为作者个人观点,转载目的在于传递更多信息,如涉及作品内容、版权等问题,可联系本站删除,谢谢。

更多内容可关注微信公众号:成都CDA数据分析师。

责任编辑: