Skip to content

Commit

Permalink
Attack changes
Browse files Browse the repository at this point in the history
  • Loading branch information
Munkhtenger19 committed May 16, 2024
1 parent dec1fa1 commit e9b1942
Show file tree
Hide file tree
Showing 41 changed files with 1,019 additions and 486 deletions.
157 changes: 131 additions & 26 deletions PyG_to_sci_sparse.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,24 +179,26 @@ def prep_graph(dataset,
num_nodes = data.__num_nodes__
else:
num_nodes = data.num_nodes

if hasattr(dataset, 'get_idx_split'):
split = dataset.get_idx_split()
else:
split = dict(
train=data.train_mask.nonzero().squeeze(),
valid=data.val_mask.nonzero().squeeze(),
test=data.test_mask.nonzero().squeeze()
)
# if hasattr(dataset, 'get_idx_split'):
# split = dataset.get_idx_split()
# else:
# split = dict(
# train=data.train_mask.nonzero().squeeze(),
# valid=data.val_mask.nonzero().squeeze(),
# test=data.test_mask.nonzero().squeeze()
# )

# converting to numpy arrays, so we don't have to handle different
# array types (tensor/numpy/list) later on.
# Also we need numpy arrays because Numba cant determine type of torch.Tensor
split = {k: v.numpy() for k, v in split.items()}
# split = {k: v.numpy() for k, v in split.items()}

edge_index = data.edge_index.cpu()
print('edge_index', edge_index)
if data.edge_attr is None:
edge_weight = torch.ones(edge_index.size(1))
# edge_weight = torch.ones(edge_index.size(1))
edge_weight = torch.full((edge_index.size(1),), 2.0)
else:
edge_weight = data.edge_attr
edge_weight = edge_weight.cpu()
Expand All @@ -223,7 +225,7 @@ def prep_graph(dataset,
adj = torch_sparse.SparseTensor.from_scipy(adj).coalesce().to(device)

attr_matrix = data.x.cpu().numpy()
print(adj)
print('prep_graph adj', adj)
attr = torch.from_numpy(attr_matrix).to(device)

logging.debug("Memory Usage after normalizing graph attributes:")
Expand All @@ -250,6 +252,7 @@ def to_symmetric_scipy(adjacency: sp.csr_matrix):
from torch_geometric.nn import GCN
from torch_geometric.datasets import Amazon
from torch_geometric.datasets import QM9, CoraFull, Planetoid
from ogb.nodeproppred import PygNodePropPredDataset
import inspect
from gnn_toolbox.common import is_directed
coraa = CoraFull(root='./datasets', transform=T.ToSparseTensor())
Expand All @@ -270,18 +273,22 @@ def to_symmetric_scipy(adjacency: sp.csr_matrix):

# dataset2 = Planetoid(name = 'cora', root = './datasets')
# dataset5 = QM9(root='./datasets', transform=T.Compose([T.ToUndirected(), T.ToSparseTensor()]))
# dataset6 = QM9(root='./datasets')
# dataset6 = PygNodePropPredDataset(name='ogbn-arxiv', root='./datasets')
# data6= dataset6[0]
# print('qm9', data6.edge_attr)

# print('qm9', data6)
# print('qm9.edge_attr', data6.edge_attr)
# from ogb.nodeproppred import PygNodePropPredDataset
# dataset3 = PygNodePropPredDataset(name='ogbn-arxiv', root='./datasets')
# dataset4 = PygNodePropPredDataset(name='ogbn-arxiv', root='./datasets', transform=T.ToUndirected())
# data = dataset[0]
# # print('pyg', data)
# data3 = dataset3[0]
# print('is_directed ogb', data3.is_directed())
# attr, adj, labels, splits, n = prep_graph(dataset4, make_undirected=False)

# attr, adj, labels, splits, n = prep_graph(dataset6, make_undirected=False)
# row, col, edge_attr2 = adj.coo()
# edge_index = torch.stack([row, col], dim=0)
# print('after edge_attr', edge_attr2)
# print('pyg prep_graph adj', adj)
# attr, adj, labels, splits, n = prep_graph(dataset3, make_undirected=True)
# print('robu prep_graph adj', adj)
Expand All @@ -295,20 +302,119 @@ def to_symmetric_scipy(adjacency: sp.csr_matrix):
# # model = DICE()
# amazon = Amazon(root='./datasets', name='computers')
# data9 = amazon[0]
def accuracy(pred, y, mask):
return (pred.argmax(-1)[mask] == y[mask]).float().mean()
# print(data9.is_directed())
# dataset1= Planetoid(name = 'cora', root = './datasets', transform=T.ToUndirected())
# data = dataset1[0]
# print('data.edge_index', data.edge_index)
dataset2= PygNodePropPredDataset(name='ogbn-arxiv', root='./datasets', transform=T.ToSparseTensor(remove_edge_index=False))
dataset1= Planetoid(name = 'cora', root = './datasets')
data = dataset1[0]
train=data.train_mask.nonzero().squeeze()
model1= GCN(in_channels=dataset1.num_features, out_channels=dataset1.num_classes, hidden_channels=32, num_layers=2)
logits = model1(data.x, data.edge_index)
# print(accuracy(logits, data.y, train))
# print(accuracy(logits, data.y, data.train_mask))
# from torch_geometric.datasets import PPI
# dataset1 = PPI(root='./datasets', transform=T.ToSparseTensor(remove_edge_index=False))
# print('pygnodepred', dataset1)
# we = dataset1[0]
# print('we', we)

# dataset2= Planetoid(name = 'citeseer', root = './datasets')
# dataset2= QM9(root = './datasets')
data2 = dataset2[0]
# train_mask=data2.train_mask.nonzero().squeeze()
# attr, adj, labels, splits, n = prep_graph(dataset2, make_undirected=False)
# print()
from torch_geometric.utils import to_undirected, is_undirected
print('is_undirected', is_undirected(data2.edge_index, num_nodes=data2.num_nodes))
adj = SparseTensor(row=data2.edge_index[0], col=data2.edge_index[1], value=data2.edge_attr).to('cuda')
adj2 = SparseTensor(row=data2.edge_index[0], col=data2.edge_index[1], value=data2.edge_weight if data2.edge_weight is not None else torch.ones(data2.edge_index.size(1))).to('cuda')
print('num_edge',adj.nnz())
print('what', adj)
print('what2', adj.t())

row, col, edge_attr = adj.t().coo()
edge_index = torch.stack([row, col], dim=0)
print('edge_index3', edge_index)

# row, col, edge_attr = adj.coo()
# edge_index = torch.stack([row, col], dim=0)
# print('edge_index1', edge_index)

# print('data2.edge_index', data2.adj_t)

# row, col, edge_attr = adj_t.t().coo()
# edge_index = torch.stack([row, col], dim=0)
edge_index = to_undirected(data2.edge_index)
print('x',edge_index.shape[1])
print('undirected', edge_index)
print(torch.equal(data2.edge_index, edge_index))
print("numm", data2.num_nodes)

print("numm2", data2.x.shape[0])
print("numm2", data2.x.size(0))
# row, col, edge_attr = adj.coo()
# cora_edge_index = torch.stack([row, col], dim=0)
# print('edge_attr', edge_attr)
# data = dataset1[0]
# row, col, edge_attr = data.adj_t.coo()
# cora_edge_index = torch.stack([row, col], dim=0)

# # cora_edge_index = cora_edge_index.to('cpu')
# if edge_attr is None:
# edge_attr = torch.ones(cora_edge_index.size(1))
# row2, col2, edge_attr2 = adj_t.coo()
# edge_index2 = torch.stack([row2, col2], dim=0)
# print('data2 edge_index', edge_index2)
# print('are they equal:', torch.equal(edge_index, edge_index2))
# model1= GCN(in_channels=dataset.num_features, out_channels=dataset.num_classes, hidden_channels=16, num_layers=2, dropout=0.5)

import torch.nn as nn
from torch_geometric.nn import GCNConv

def accuracy(pred, y, mask):
return (pred.argmax(-1)[mask] == y[mask]).float().mean()

# @register_model("GCN")
class GCNWH(nn.Module):
def __init__(self, in_channels: int, out_channels: int, hidden_channels: int, num_layers: int = 2, dropout: float = 0.5, **kwargs):
super().__init__()
self.GCNConv1 = GCNConv(in_channels=in_channels, out_channels=hidden_channels)
self.layers = nn.ModuleList([GCNConv(hidden_channels, hidden_channels) for _ in range(num_layers)])
self.dropout = nn.Dropout(dropout)
self.GCNConv2 = GCNConv(in_channels=hidden_channels, out_channels=out_channels)

def forward(self, x, edge_index, edge_weight, **kwargs):
x = self.GCNConv1(x, edge_index, edge_weight).relu()
x = F.dropout(x, training=self.training)
x = self.GCNConv2(x, edge_index, edge_weight)
return x

def train3(model, attr, edge_index, data, label,edge_weight=None, epochs=200, lr=0.01, weight_decay=5e-4):
model = model.to('cuda')
res = []
model.train()
optimizer = Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
for module in model.children():
module.reset_parameters()
for _ in range(epochs):
optimizer.zero_grad()
pred = model(attr, edge_index, edge_weight)
loss = F.cross_entropy(pred[data.train_mask], label[data.train_mask])
acc = float(accuracy(pred, label, data.train_mask))
res.append(acc)
loss.backward()
optimizer.step()

with torch.no_grad():
model.eval()
pred = model(attr, edge_index, edge_weight)
acc = float(accuracy(pred, label, data.train_mask))
print('model2 acc', acc)
return res

# print(train3(model1, data.x, cora_edge_index, edge_attr, data, data.y))
# from pprint import pprint
# res =train3(model1,attr, cora_edge_index, data2, labels, edge_attr)
# pprint(res)

# train2(model1,)
# model2 = GCN(in_channels=dataset.num_features, out_channels=dataset.num_classes, hidden_channels=16, num_layers=2, dropout=0.5)

def train(model, data, epochs=200, lr=0.01, weight_decay=5e-4):
Expand All @@ -321,8 +427,7 @@ def train(model, data, epochs=200, lr=0.01, weight_decay=5e-4):
loss.backward()
optimizer.step()

def accuracy(pred, y, mask):
return (pred.argmax(-1)[mask] == y[mask]).float().mean()


@torch.no_grad()
def test(model, data):
Expand All @@ -340,7 +445,7 @@ def train2(model, attr, adj, split, label, epochs=200, lr=0.01, weight_decay=5e-
optimizer = Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
for _ in range(epochs):
optimizer.zero_grad()
pred = model(attr, adj.t())
pred = model(attr, adj)
loss = F.cross_entropy(pred[split['train']], label[split['train']])
loss.backward()
optimizer.step()
Expand Down
28 changes: 15 additions & 13 deletions configs/default_experiment.yaml
Original file line number Diff line number Diff line change
@@ -1,20 +1,21 @@
output_dir: ./output2
output_dir: ./output6
# resume_output: False
# csv_save: True
# esd: True
experiment_templates:
- name: My experiment
seed: [0]
device: cuda
model:
- name: GCN
params:
hidden_channels: 32
hidden_channels: 64
dataset:
name: Cora
name: Cora #ogbn-arxiv
root: ./datasets
make_undirected: True
transforms:
- name: NormalizeFeatures
make_undirected: False
# transforms:
# - name: NormalizeFeatures
# - name: Constant
# params:
# value: 0.8
Expand All @@ -36,20 +37,21 @@ experiment_templates:
# name: DICE, FGSM, PGD, PRBCD, GreedyRBCD]
# type: [poison, evasion]
# epsilon: [0.5]
- scope: global
name: [PRBCD, GreedyRBCD]
- scope: local
name: [LocalPRBCD, LocalDICE]
type: poison
epsilon: [0.5]
epsilon: [0.3]
nodes: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
# - scope: local
# name: localDICE
# name: Nettack
# type: evasion
# epsilon: [0.2]
# nodes: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
# nodes: [0]


training:
max_epochs: 300
patience: 100
max_epochs: 100
patience: 70

optimizer:
name: adam
Expand Down
Loading

0 comments on commit e9b1942

Please sign in to comment.