import GPy
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
import regdata as rd
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
= rd.Step().get_data()
x_train, y_train, x_test = y_train.reshape(-1, 1)
y_train = x_test * 1.5
x_test print(x_train.shape, y_train.shape, x_test.shape)
='train'); plt.scatter(x_train, y_train, label
(50, 1) (50, 1) (100, 1)
= GPy.kern.RBF(1, variance=1, lengthscale=1)
kernel = GPy.models.GPRegression(x_train, y_train.reshape(-1, 1), kernel)
model = 0.1
model.Gaussian_noise.variance
= model.predict(x_test)
y_pred_gp, y_var
='train');
plt.scatter(x_train, y_train, label='pred'); plt.plot(x_test, y_pred_gp, label
class GCN_Forward(nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.fc = nn.Linear(in_features, out_features)
def forward(self, x, A):
= self.fc(x)
x = torch.matmul(A, x)
x return x
class GCN_Reverse(nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.fc = nn.Linear(in_features, out_features)
def forward(self, x, A):
= torch.matmul(A, x)
x = self.fc(x)
x return x
class NN(nn.Module):
def __init__(self, features):
super().__init__()
self.features = features
for i, (in_features, out_features) in enumerate(zip(features[:-1], features[1:])):
setattr(self, f'layer_{i}', nn.Linear(in_features, out_features))
self.last_layer = nn.Linear(features[-1], 1)
def forward(self, x, A):
for i in range(len(self.features) - 1):
if isinstance(getattr(self, f'layer_{i}'), GCN_Forward):
= getattr(self, f'layer_{i}')(x, A)
x else:
= getattr(self, f'layer_{i}')(x)
x = nn.functional.gelu(x)
x
= self.last_layer(x)
x return x
class GCN(NN):
def __init__(self, features):
super().__init__(features)
for i, (in_features, out_features) in enumerate(zip(features[:-1], features[1:])):
setattr(self, f'layer_{i}', GCN_Forward(in_features, out_features))
= torch.tensor(kernel.K(x_train, x_train)).float()
A # A.fill_diagonal_(0)
= A / A.sum(dim=0, keepdim=True)
A # A.fill_diagonal_(1)
= 500
num_epochs = [1, 1024]
features
= GCN(features=features)
gcn_model = NN(features=features)
nn_model
= torch.optim.Adam(gcn_model.parameters(), lr=0.01)
gcn_optimizer = torch.optim.Adam(nn_model.parameters(), lr=0.01)
nn_optimizer
= nn.MSELoss()
criterion
= torch.from_numpy(x_train).float()
x_train_torch = torch.from_numpy(y_train).float()
y_train_torch
= []
gcn_losses = []
nn_losses for epoch in range(num_epochs):
gcn_optimizer.zero_grad()
nn_optimizer.zero_grad()
= gcn_model(x_train_torch, A)
y_out_gcn = nn_model(x_train_torch, A)
y_out_nn = criterion(y_out_gcn, y_train_torch)
gcn_loss = criterion(y_out_nn, y_train_torch)
nn_loss
gcn_loss.backward()
nn_loss.backward()
gcn_losses.append(gcn_loss.item())
nn_losses.append(nn_loss.item())
gcn_optimizer.step()
nn_optimizer.step()
='gcn');
plt.plot(gcn_losses, label='nn');
plt.plot(nn_losses, label; plt.legend()
= torch.tensor(kernel.K(x_test, x_test)).float()
A_test # A_test.fill_diagonal_(0)
= A_test / A_test.sum(dim=0, keepdim=True)
A_test # A_test.fill_diagonal_(1)
= nn_model(torch.from_numpy(x_test).float(), A_test).detach().numpy()
y_pred_nn = gcn_model(torch.from_numpy(x_test).float(), A_test).detach().numpy()
y_pred_gcn
=(10, 6))
plt.figure(figsize='train');
plt.scatter(x_train, y_train, label='pred GCN train');
plt.plot(x_train, y_out_gcn.detach().numpy(), label='pred NN train');
plt.plot(x_train, y_out_nn.detach().numpy(), label='pred GP', linestyle='--');
plt.plot(x_test, y_pred_gp, label='pred NN');
plt.plot(x_test, y_pred_nn, label='pred GCN');
plt.plot(x_test, y_pred_gcn, label-3, 3);
plt.ylim(; plt.legend()