英招

牢骚太盛防肠断,风物长宜放眼量

0%

GNN(by pytoch)

前期准备

下载anaconda、pytorch、pyg,pyg安装参考11. 1-PyTorch Geometric工具包安装与配置方法_哔哩哔哩_bilibili

Pytorch+PyG实现GAT过程示例

GAT分类

构建包含10个节点和20条边的图,通过GAT对节点进行分类。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import torch
import torch.nn.functional as F
from torch_geometric.nn import GATConv

# 定义GAT模型
class GAT(torch.nn.Module):
def __init__(self):
super(GAT, self).__init__()
self.conv1 = GATConv(10, 16, heads=8)
self.conv2 = GATConv(16*8, 2, heads=1)

def forward(self, x, edge_index):
x = F.dropout(x, p=0.5, training=self.training)
x = F.elu(self.conv1(x, edge_index))
x = F.dropout(x, p=0.5, training=self.training)
x = self.conv2(x, edge_index)
return F.log_softmax(x, dim=1)

# 构建图数据
x = torch.randn(10, 10)
edge_index = torch.tensor([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 0]], dtype=torch.long)

# 初始化模型和优化器
model = GAT()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)

# 训练模型
model.train()
for epoch in range(100):
optimizer.zero_grad()
out = model(x, edge_index)
loss = F.nll_loss(out, torch.tensor([0, 1, 0, 1, 0, 1, 0, 1, 0, 1]))
loss.backward()
optimizer.step()

GAT回归

构建包含10个节点和20条边的图,通过GAT对节点进行回归。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import torch
import torch.nn.functional as F
from torch_geometric.nn import GATConv

# 定义GAT模型
class GAT(torch.nn.Module):
def __init__(self):
super(GAT, self).__init__()
self.conv1 = GATConv(10, 16, heads=8)
self.conv2 = GATConv(16*8, 1, heads=1)

def forward(self, x, edge_index):
x = F.dropout(x, p=0.5, training=self.training)
x = F.elu(self.conv1(x, edge_index))
x = F.dropout(x, p=0.5, training=self.training)
x = self.conv2(x, edge_index)
return x

# 构建图数据
x = torch.randn(10, 10)
edge_index = torch.tensor([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 0]], dtype=torch.long)
y = torch.randn(10, 1)

# 初始化模型和优化器
model = GAT()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)

# 训练模型
model.train()
for epoch in range(100):
optimizer.zero_grad()
out = model(x, edge_index)
loss = F.mse_loss(out, y)
loss.backward()
optimizer.step()