forked from swuxyj/DeepHash-pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathLCDSH.py
104 lines (77 loc) · 3.14 KB
/
LCDSH.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
from utils.tools import *
from network import *
import os
import torch
import torch.optim as optim
import time
import numpy as np
torch.multiprocessing.set_sharing_strategy('file_system')
# LCDSH(IJCAI2017)
# paper [Locality-Constrained Deep Supervised Hashing for Image Retrieval](https://www.ijcai.org/Proceedings/2017/0499.pdf)
# [LCDSH] epoch:145, bit:48, dataset:cifar10-1, MAP:0.798, Best MAP: 0.798
# [LCDSH] epoch:183, bit:48, dataset:nuswide_21, MAP:0.833, Best MAP: 0.834
def get_config():
config = {
"lambda": 3,
# "optimizer": {"type": optim.Adam, "optim_params": {"lr": 1e-5, "betas": (0.9, 0.999)}},
"optimizer": {"type": optim.RMSprop, "optim_params": {"lr": 1e-5, "weight_decay": 10 ** -5}},
"info": "[LCDSH]",
"resize_size": 256,
"crop_size": 224,
"batch_size": 128,
"net": AlexNet,
# "net":ResNet,
"dataset": "cifar10-1",
# "dataset": "nuswide_21",
"epoch": 350,
"test_map": 5,
# "device":torch.device("cpu"),
"device": torch.device("cuda:1"),
"bit_list": [48],
}
config = config_dataset(config)
return config
class LCDSHLoss(torch.nn.Module):
def __init__(self, config, bit):
super(LCDSHLoss, self).__init__()
def forward(self, u, y, ind, config):
s = 2 * (y @ y.t() > 0).float() - 1
inner_product = u @ u.t() * 0.5
inner_product = inner_product.clamp(min=-50, max=50)
L1 = torch.log(1 + torch.exp(-s * inner_product)).mean()
b = u.sign()
inner_product_ = b @ b.t() * 0.5
L2 = (inner_product.sigmoid() - inner_product_.sigmoid()).pow(2).mean()
return L1 + config["lambda"] * L2
def train_val(config, bit):
device = config["device"]
train_loader, test_loader, dataset_loader, num_train, num_test, num_dataset = get_data(config)
config["num_train"] = num_train
net = config["net"](bit).to(device)
optimizer = config["optimizer"]["type"](net.parameters(), **(config["optimizer"]["optim_params"]))
criterion = LCDSHLoss(config, bit)
Best_mAP = 0
for epoch in range(config["epoch"]):
current_time = time.strftime('%H:%M:%S', time.localtime(time.time()))
print("%s[%2d/%2d][%s] bit:%d, dataset:%s, training...." % (
config["info"], epoch + 1, config["epoch"], current_time, bit, config["dataset"]), end="")
net.train()
train_loss = 0
for image, label, ind in train_loader:
image = image.to(device)
label = label.to(device)
optimizer.zero_grad()
u = net(image)
loss = criterion(u, label.float(), ind, config)
train_loss += loss.item()
loss.backward()
optimizer.step()
train_loss = train_loss / len(train_loader)
print("\b\b\b\b\b\b\b loss:%.3f" % (train_loss))
if (epoch + 1) % config["test_map"] == 0:
Best_mAP = validate(config, Best_mAP, test_loader, dataset_loader, net, bit, epoch, num_dataset)
if __name__ == "__main__":
config = get_config()
print(config)
for bit in config["bit_list"]:
train_val(config, bit)