這篇文章主要介紹了如何通過python實(shí)現(xiàn)人臉識(shí)別驗(yàn)證,文中通過示例代碼介紹的非常詳細(xì),對(duì)大家的學(xué)習(xí)或者工作具有一定的參考學(xué)習(xí)價(jià)值,需要的朋友可以參考下
直接上代碼,此案例是根據(jù)https://github.com/caibojian/face_login修改的,識(shí)別率不怎么好,有時(shí)擋了半個(gè)臉還是成功的
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
|
# -*- coding: utf-8 -*- # __author__="maple" """ ┏┓ ┏┓ ┏┛┻━━━┛┻┓ ┃ ? ┃ ┃ ┳┛ ┗┳ ┃ ┃ ┻ ┃ ┗━┓ ┏━┛ ┃ ┗━━━┓ ┃ 神獸保佑 ┣┓ ┃ 永無BUG! ┏┛ ┗┓┓┏━┳┓┏┛ ┃┫┫ ┃┫┫ ┗┻┛ ┗┻┛ """ import base64 import cv2 import time from io import BytesIO from tensorflow import keras from PIL import Image from pymongo import MongoClient import tensorflow as tf import face_recognition import numpy as np #mongodb連接 conn = MongoClient( 'mongodb://root:123@localhost:27017/' ) db = conn.myface #連接mydb數(shù)據(jù)庫,沒有則自動(dòng)創(chuàng)建 user_face = db.user_face #使用test_set集合,沒有則自動(dòng)創(chuàng)建 face_images = db.face_images lables = [] datas = [] INPUT_NODE = 128 LATER1_NODE = 200 OUTPUT_NODE = 0 TRAIN_DATA_SIZE = 0 TEST_DATA_SIZE = 0 def generateds(): get_out_put_node() train_x, train_y, test_x, test_y = np.array(datas),np.array(lables),np.array(datas),np.array(lables) return train_x, train_y, test_x, test_y def get_out_put_node(): for item in face_images.find(): lables.append(item[ 'user_id' ]) datas.append(item[ 'face_encoding' ]) OUTPUT_NODE = len ( set (lables)) TRAIN_DATA_SIZE = len (lables) TEST_DATA_SIZE = len (lables) return OUTPUT_NODE, TRAIN_DATA_SIZE, TEST_DATA_SIZE # 驗(yàn)證臉部信息 def predict_image(image): model = tf.keras.models.load_model( 'face_model.h5' , compile = False ) face_encode = face_recognition.face_encodings(image) result = [] for j in range ( len (face_encode)): predictions1 = model.predict(np.array(face_encode[j]).reshape( 1 , 128 )) print (predictions1) if np. max (predictions1[ 0 ]) > 0.90 : print (np.argmax(predictions1[ 0 ]).dtype) pred_user = user_face.find_one({ 'id' : int (np.argmax(predictions1[ 0 ]))}) print ( '第%d張臉是%s' % (j + 1 , pred_user[ 'user_name' ])) result.append(pred_user[ 'user_name' ]) return result # 保存臉部信息 def save_face(pic_path,uid): image = face_recognition.load_image_file(pic_path) face_encode = face_recognition.face_encodings(image) print (face_encode[ 0 ].shape) if ( len (face_encode) = = 1 ): face_image = { 'user_id' : uid, 'face_encoding' :face_encode[ 0 ].tolist() } face_images.insert_one(face_image) # 訓(xùn)練臉部信息 def train_face(): train_x, train_y, test_x, test_y = generateds() dataset = tf.data.Dataset.from_tensor_slices((train_x, train_y)) dataset = dataset.batch( 32 ) dataset = dataset.repeat() OUTPUT_NODE, TRAIN_DATA_SIZE, TEST_DATA_SIZE = get_out_put_node() model = keras.Sequential([ keras.layers.Dense( 128 , activation = tf.nn.relu), keras.layers.Dense( 128 , activation = tf.nn.relu), keras.layers.Dense(OUTPUT_NODE, activation = tf.nn.softmax) ]) model. compile (optimizer = tf.compat.v1.train.AdamOptimizer(), loss = 'sparse_categorical_crossentropy' , metrics = [ 'accuracy' ]) steps_per_epoch = 30 if steps_per_epoch > len (train_x): steps_per_epoch = len (train_x) model.fit(dataset, epochs = 10 , steps_per_epoch = steps_per_epoch) model.save( 'face_model.h5' ) def register_face(user): if user_face.find({ "user_name" : user}).count() > 0 : print ( "用戶已存在" ) return video_capture = cv2.VideoCapture( 0 ) # 在MongoDB中使用sort()方法對(duì)數(shù)據(jù)進(jìn)行排序,sort()方法可以通過參數(shù)指定排序的字段,并使用 1 和 -1 來指定排序的方式,其中 1 為升序,-1為降序。 finds = user_face.find().sort([( "id" , - 1 )]).limit( 1 ) uid = 0 if finds.count() > 0 : uid = finds[ 0 ][ 'id' ] + 1 print (uid) user_info = { 'id' : uid, 'user_name' : user, 'create_time' : time.time(), 'update_time' : time.time() } user_face.insert_one(user_info) while 1 : # 獲取一幀視頻 ret, frame = video_capture.read() # 窗口顯示 cv2.imshow( 'Video' ,frame) # 調(diào)整角度后連續(xù)拍5張圖片 if cv2.waitKey( 1 ) & 0xFF = = ord ( 'q' ): for i in range ( 1 , 6 ): cv2.imwrite( 'Myface{}.jpg' . format (i), frame) with open ( 'Myface{}.jpg' . format (i), "rb" )as f: img = f.read() img_data = BytesIO(img) im = Image. open (img_data) im = im.convert( 'RGB' ) imgArray = np.array(im) faces = face_recognition.face_locations(imgArray) save_face( 'Myface{}.jpg' . format (i),uid) break train_face() video_capture.release() cv2.destroyAllWindows() def rec_face(): video_capture = cv2.VideoCapture( 0 ) while 1 : # 獲取一幀視頻 ret, frame = video_capture.read() # 窗口顯示 cv2.imshow( 'Video' ,frame) # 驗(yàn)證人臉的5照片 if cv2.waitKey( 1 ) & 0xFF = = ord ( 'q' ): for i in range ( 1 , 6 ): cv2.imwrite( 'recface{}.jpg' . format (i), frame) break res = [] for i in range ( 1 , 6 ): with open ( 'recface{}.jpg' . format (i), "rb" )as f: img = f.read() img_data = BytesIO(img) im = Image. open (img_data) im = im.convert( 'RGB' ) imgArray = np.array(im) predict = predict_image(imgArray) if predict: res.extend(predict) b = set (res) # {2, 3} if len (b) = = 1 and len (res) > = 3 : print ( " 驗(yàn)證成功" ) else : print ( " 驗(yàn)證失敗" ) if __name__ = = '__main__' : register_face( "maple" ) rec_face() |
以上就是本文的全部?jī)?nèi)容,希望對(duì)大家的學(xué)習(xí)有所幫助,也希望大家多多支持服務(wù)器之家。
原文鏈接:https://www.cnblogs.com/angelyan/p/12113773.html