Skip to content

Commit

Permalink
initialize
Browse files Browse the repository at this point in the history
  • Loading branch information
rushuai.liu committed Apr 20, 2021
1 parent 322d61c commit 5515e02
Show file tree
Hide file tree
Showing 14 changed files with 1,274 additions and 1 deletion.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
__pycache__
face_train_ms1mv2.txt
output
21 changes: 20 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,22 @@
# A Simple Explicit Quality Network for Face Recognition

Comming soon
## Training Data

1. Download [MS1Mv2](https://github.com/deepinsight/insightface/wiki/Dataset-Zoo)
2. Extract image files by [rec2image.py](https://github.com/deepinsight/insightface/blob/master/recognition/common/rec2image.py)
3. Generate the training file list
```
cd dataset
python generate_file_list.py
```

## Training
1. Step 1: set config.py, then run **python train_feature.py**
```
```
2. Step 2: set config.py, then run **python train_quality.py**
```
```
3. Step 3: set config.py, then run **python train_feature.py**
```
```
31 changes: 31 additions & 0 deletions config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
import torch
import os

class Config:
MODEL_ROOT = 'output/deepcam_model'
LOG_ROOT = 'output/deepcam_log'
BACKBONE_RESUME_ROOT = './backbone_resume.pth'
HEAD_RESUME_ROOT = './head_resume.pth'
TRAIN_FILES = './dataset/face_train_ms1mv2.txt'

RGB_MEAN = [0.5, 0.5, 0.5]
RGB_STD = [0.5, 0.5, 0.5]
EMBEDDING_SIZE = 512
BATCH_SIZE = 900
DROP_LAST = True
BACKBONE_LR = 0.05
QUALITY_LR = 0.01
NUM_EPOCH = 90
WEIGHT_DECAY = 5e-4
MOMENTUM = 0.9

HEAD_GPUS = [0]
BACKBONE_GPUS = [1, 2, 3]

PRETRAINED_BACKBONE = 'backbone_resume.pth'
PRETRAINED_QUALITY = 'qulity_resume.pth'

NUM_EPOCH_WARM_UP = 1
FIXED_BACKBONE_FEATURE = False

config = Config()
58 changes: 58 additions & 0 deletions dataset/dataset.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
from torch.utils import data
from PIL import Image
import random
import os
import os.path
import sys

import cv2
import numpy as np

def random_compress(img):
rand_num = random.randint(40, 90)
img_encode = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY),rand_num])
data_encode = np.array(img_encode[1])
str_encode = data_encode.tostring()
nparr = np.fromstring(str_encode, np.uint8)
img_decode = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
return img_decode

def pil_loader(path):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')


def cv2_loader(path):
img1 = cv2.imread(path)
if np.random.random() < 0.5:
size = np.random.choice([60, 80, 100])
img1 = cv2.resize(img1, (size, size))
img2 = cv2.cvtColor(img1,cv2.COLOR_BGR2RGB)
img = Image.fromarray(img2)
return img

class ImageFolder(data.Dataset):
def __init__(self, trainList, transform=None, loader=None):
super(ImageFolder, self).__init__()
self.transform = transform
if loader is None:
self.loader = cv2_loader
else:
self.loader = loader
with open(trainList) as f:
self.samples = f.readlines()
self.classes = int(self.samples[-1].split(';')[1]) + 1

def __getitem__(self, index):
path, target = self.samples[index].split(';')
target = int(target)
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)

return sample, target

def __len__(self):
return len(self.samples)

29 changes: 29 additions & 0 deletions dataset/generate_file_list.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
import os

DATA_DIR = '/m2-data/rushuai.liu/faceQuality'
train_data_dirs=[
'ms1mv2',
]

out_file = open('face_train_ms1mv2.txt', 'w')

person_count = 0

for root_dir in train_data_dirs:
root_dir = os.path.join(DATA_DIR, root_dir)
if not os.path.isdir(root_dir):
continue
for person_dir in os.listdir(root_dir):
person_dir = os.path.join(root_dir, person_dir)
count = 0
for filename in os.listdir(person_dir):
filename = os.path.join(person_dir, filename)
if filename.endswith(('.png','jpg','.bmp')) and os.path.isfile(filename):
count+=1
print(os.path.abspath(filename)+';'+str(person_count), file=out_file)

if count > 0:
person_count+=1


out_file.close()
19 changes: 19 additions & 0 deletions models/focal.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
import torch
import torch.nn as nn


# Support: ['FocalLoss']


class FocalLoss(nn.Module):
def __init__(self, gamma = 2, eps = 1e-7):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.eps = eps
self.ce = nn.CrossEntropyLoss(reduction='none')

def forward(self, input, target):
logp = self.ce(input, target)
p = torch.exp(-logp)
loss = (1 - p) ** self.gamma * logp
return loss.mean()
50 changes: 50 additions & 0 deletions models/metrics.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
import math

class TeacherLoss(nn.Module):
def __init__(self):
super(TeacherLoss, self).__init__()
self.mse = nn.MSELoss(reduction='none')

def forward(self, confidence, feature, teacher_feature, gaussian=False):
loss = self.mse(F.normalize(feature), F.normalize(teacher_feature))
if gaussian:
loss = loss * confidence
loss = loss.sum() / feature.size(0)
return loss

class GaussianFace(nn.Module):
def __init__(self, in_features, out_features, s = 64, m = 0.5):
super(GaussianFace, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.s = s
self.weight = Parameter(torch.FloatTensor(out_features, in_features))
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))

self.cos_m = math.cos(m)
self.sin_m = math.sin(m)
self.th = math.cos(math.pi - m)
self.mm = math.sin(math.pi - m) * m

def forward(self, confidence, input, label, gaussian=True):
weight = F.normalize(self.weight)
cosine = F.linear(F.normalize(input), weight)
sine = torch.sqrt(1.0 - torch.pow(cosine, 2))
phi = cosine * self.cos_m - sine * self.sin_m
phi = phi.half()
phi = torch.where(cosine > self.th, phi, cosine - self.mm)
one_hot = torch.zeros_like(cosine)
one_hot.scatter_(1, label.view(-1, 1).long(), 1)
output = torch.where(one_hot==0, cosine, phi)
if gaussian:
confidence = torch.clamp(confidence - 0.2, 0, 1) * 1.2
output = output * self.s * confidence
else:
output = output * self.s
return output
118 changes: 118 additions & 0 deletions models/model_resnet.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
import torch
from torch import nn

class BottleNeck_IR(nn.Module):
def __init__(self, in_channel, out_channel, stride, dim_match):
super(BottleNeck_IR, self).__init__()
self.res_layer = nn.Sequential(nn.BatchNorm2d(in_channel),
nn.Conv2d(in_channel, out_channel, (3, 3), 1, 1, bias=False),
nn.BatchNorm2d(out_channel),
nn.PReLU(out_channel),
nn.Conv2d(out_channel, out_channel, (3, 3), stride, 1, bias=False),
nn.BatchNorm2d(out_channel))
if dim_match:
self.shortcut_layer = None
else:
self.shortcut_layer = nn.Sequential(
nn.Conv2d(in_channel, out_channel, kernel_size=(1, 1), stride=stride, bias=False),
nn.BatchNorm2d(out_channel)
)

def forward(self, x):
shortcut = x
res = self.res_layer(x)

if self.shortcut_layer is not None:
shortcut = self.shortcut_layer(x)

return shortcut + res

channel_list = [64, 64, 128, 256, 512]
def get_layers(num_layers):
if num_layers == 34:
return [3, 4, 6, 3]
if num_layers == 50:
return [3, 4, 14, 3]
elif num_layers == 100:
return [3, 13, 30, 3]
elif num_layers == 152:
return [3, 8, 36, 3]

class ResNet(nn.Module):
def __init__(self, num_layers=100, feature_dim=512, drop_ratio=0.4, channel_list=channel_list):
super(ResNet, self).__init__()
assert num_layers in [34, 50, 100, 152]
layers = get_layers(num_layers)
block = BottleNeck_IR

self.input_layer = nn.Sequential(nn.Conv2d(3, channel_list[0], (3, 3), stride=1, padding=1, bias=False),
nn.BatchNorm2d(channel_list[0]),
nn.PReLU(channel_list[0]))
self.layer1 = self._make_layer(block, channel_list[0], channel_list[1], layers[0], stride=2)
self.layer2 = self._make_layer(block, channel_list[1], channel_list[2], layers[1], stride=2)
self.layer3 = self._make_layer(block, channel_list[2], channel_list[3], layers[2], stride=2)
self.layer4 = self._make_layer(block, channel_list[3], channel_list[4], layers[3], stride=2)

self.output_layer = nn.Sequential(nn.BatchNorm2d(512),
nn.Dropout(drop_ratio),
nn.Flatten())
self.feature_layer = nn.Sequential(nn.Linear(512 * 7 * 7, feature_dim),
nn.BatchNorm1d(feature_dim))

for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)

def _make_layer(self, block, in_channel, out_channel, blocks, stride):
layers = []
layers.append(block(in_channel, out_channel, stride, False))
for i in range(1, blocks):
layers.append(block(out_channel, out_channel, 1, True))
return nn.Sequential(*layers)

def forward(self, x, fc=False):
x = self.input_layer(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.output_layer(x)
feature = self.feature_layer(x)
if fc:
return feature, x
return feature

class FaceQuality(nn.Module):
def __init__(self, feature_dim):
super(FaceQuality, self).__init__()
self.qualtiy = nn.Sequential(
nn.Linear(feature_dim, 512, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Linear(512, 2, bias=False),
nn.Softmax(dim=1)
)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)

def forward(self, x):
x = self.qualtiy(x)
return x[:, 0:1]

if __name__ == '__main__':
input = torch.Tensor(2, 3, 112, 112)
net = ResNet(50, mode='ir')

out = net(input)
print(out.shape)
Loading

0 comments on commit 5515e02

Please sign in to comment.