diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/32ch/50e/GNN_state_pred_het_dict b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/32ch/50e/GNN_state_pred_het_dict new file mode 100644 index 000000000..e91a4e7e7 Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/32ch/50e/GNN_state_pred_het_dict differ diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/32ch/50e/GNN_state_pred_het_full b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/32ch/50e/GNN_state_pred_het_full new file mode 100644 index 000000000..d8c075e88 Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/32ch/50e/GNN_state_pred_het_full differ diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/32ch/50e/train_res b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/32ch/50e/train_res new file mode 100644 index 000000000..156a41272 --- /dev/null +++ b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/32ch/50e/train_res @@ -0,0 +1,50 @@ +Epoch: 000, Train Loss: 0.059122, Test Loss: 0.059964 +Epoch: 001, Train Loss: 0.038855, Test Loss: 0.039430 +Epoch: 002, Train Loss: 0.029786, Test Loss: 0.030119 +Epoch: 003, Train Loss: 0.024270, Test Loss: 0.024509 +Epoch: 004, Train Loss: 0.020792, Test Loss: 0.021115 +Epoch: 005, Train Loss: 0.017929, Test Loss: 0.018149 +Epoch: 006, Train Loss: 0.016036, Test Loss: 0.016234 +Epoch: 007, Train Loss: 0.014971, Test Loss: 0.015116 +Epoch: 008, Train Loss: 0.013815, Test Loss: 0.013943 +Epoch: 009, Train Loss: 0.013231, Test Loss: 0.013315 +Epoch: 010, Train Loss: 0.012711, Test Loss: 0.012753 +Epoch: 011, Train Loss: 0.012124, Test Loss: 0.012133 +Epoch: 012, Train Loss: 0.011642, Test Loss: 0.011636 +Epoch: 013, Train Loss: 0.011475, Test Loss: 0.011482 +Epoch: 014, Train Loss: 0.011344, Test Loss: 0.011288 +Epoch: 015, Train Loss: 0.011121, Test Loss: 0.011025 +Epoch: 016, Train Loss: 0.010511, Test Loss: 0.010454 +Epoch: 017, Train Loss: 0.010048, Test Loss: 0.009998 +Epoch: 018, Train Loss: 0.010281, Test Loss: 0.010169 +Epoch: 019, Train Loss: 0.009415, Test Loss: 0.009375 +Epoch: 020, Train Loss: 0.009319, Test Loss: 0.009258 +Epoch: 021, Train Loss: 0.008570, Test Loss: 0.008602 +Epoch: 022, Train Loss: 0.008747, Test Loss: 0.008783 +Epoch: 023, Train Loss: 0.008152, Test Loss: 0.008256 +Epoch: 024, Train Loss: 0.007792, Test Loss: 0.007926 +Epoch: 025, Train Loss: 0.007487, Test Loss: 0.007645 +Epoch: 026, Train Loss: 0.007250, Test Loss: 0.007391 +Epoch: 027, Train Loss: 0.007021, Test Loss: 0.007208 +Epoch: 028, Train Loss: 0.006973, Test Loss: 0.007146 +Epoch: 029, Train Loss: 0.006621, Test Loss: 0.006818 +Epoch: 030, Train Loss: 0.006720, Test Loss: 0.006909 +Epoch: 031, Train Loss: 0.006614, Test Loss: 0.006824 +Epoch: 032, Train Loss: 0.006298, Test Loss: 0.006511 +Epoch: 033, Train Loss: 0.006266, Test Loss: 0.006468 +Epoch: 034, Train Loss: 0.006041, Test Loss: 0.006268 +Epoch: 035, Train Loss: 0.005902, Test Loss: 0.006126 +Epoch: 036, Train Loss: 0.005832, Test Loss: 0.006063 +Epoch: 037, Train Loss: 0.005818, Test Loss: 0.006040 +Epoch: 038, Train Loss: 0.005659, Test Loss: 0.005892 +Epoch: 039, Train Loss: 0.005553, Test Loss: 0.005800 +Epoch: 040, Train Loss: 0.005546, Test Loss: 0.005777 +Epoch: 041, Train Loss: 0.005445, Test Loss: 0.005681 +Epoch: 042, Train Loss: 0.005366, Test Loss: 0.005610 +Epoch: 043, Train Loss: 0.005320, Test Loss: 0.005567 +Epoch: 044, Train Loss: 0.005238, Test Loss: 0.005477 +Epoch: 045, Train Loss: 0.005240, Test Loss: 0.005475 +Epoch: 046, Train Loss: 0.005135, Test Loss: 0.005381 +Epoch: 047, Train Loss: 0.005087, Test Loss: 0.005332 +Epoch: 048, Train Loss: 0.005119, Test Loss: 0.005340 +Epoch: 049, Train Loss: 0.005116, Test Loss: 0.005339 diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/64ch/100e/GNN_state_pred_het_dict b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/64ch/100e/GNN_state_pred_het_dict new file mode 100644 index 000000000..c56f81e0f Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/64ch/100e/GNN_state_pred_het_dict differ diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/64ch/100e/GNN_state_pred_het_full b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/64ch/100e/GNN_state_pred_het_full new file mode 100644 index 000000000..d38283e28 Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/64ch/100e/GNN_state_pred_het_full differ diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/64ch/100e/train_res b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/64ch/100e/train_res new file mode 100644 index 000000000..3aaea4b96 --- /dev/null +++ b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/64ch/100e/train_res @@ -0,0 +1,100 @@ +Epoch: 000, Train Loss: 0.040080, Test Loss: 0.040366 +Epoch: 001, Train Loss: 0.020591, Test Loss: 0.020814 +Epoch: 002, Train Loss: 0.014610, Test Loss: 0.014798 +Epoch: 003, Train Loss: 0.013165, Test Loss: 0.013216 +Epoch: 004, Train Loss: 0.012829, Test Loss: 0.012804 +Epoch: 005, Train Loss: 0.011173, Test Loss: 0.011158 +Epoch: 006, Train Loss: 0.009944, Test Loss: 0.009942 +Epoch: 007, Train Loss: 0.010120, Test Loss: 0.010082 +Epoch: 008, Train Loss: 0.008894, Test Loss: 0.008864 +Epoch: 009, Train Loss: 0.008147, Test Loss: 0.008130 +Epoch: 010, Train Loss: 0.007288, Test Loss: 0.007290 +Epoch: 011, Train Loss: 0.006985, Test Loss: 0.006982 +Epoch: 012, Train Loss: 0.006598, Test Loss: 0.006611 +Epoch: 013, Train Loss: 0.006351, Test Loss: 0.006373 +Epoch: 014, Train Loss: 0.006065, Test Loss: 0.006075 +Epoch: 015, Train Loss: 0.006192, Test Loss: 0.006187 +Epoch: 016, Train Loss: 0.005840, Test Loss: 0.005839 +Epoch: 017, Train Loss: 0.005528, Test Loss: 0.005530 +Epoch: 018, Train Loss: 0.005721, Test Loss: 0.005723 +Epoch: 019, Train Loss: 0.005340, Test Loss: 0.005340 +Epoch: 020, Train Loss: 0.005146, Test Loss: 0.005162 +Epoch: 021, Train Loss: 0.005406, Test Loss: 0.005417 +Epoch: 022, Train Loss: 0.004950, Test Loss: 0.004966 +Epoch: 023, Train Loss: 0.005079, Test Loss: 0.005083 +Epoch: 024, Train Loss: 0.005161, Test Loss: 0.005192 +Epoch: 025, Train Loss: 0.004708, Test Loss: 0.004746 +Epoch: 026, Train Loss: 0.004645, Test Loss: 0.004679 +Epoch: 027, Train Loss: 0.004525, Test Loss: 0.004560 +Epoch: 028, Train Loss: 0.004408, Test Loss: 0.004456 +Epoch: 029, Train Loss: 0.004468, Test Loss: 0.004510 +Epoch: 030, Train Loss: 0.004402, Test Loss: 0.004468 +Epoch: 031, Train Loss: 0.004193, Test Loss: 0.004245 +Epoch: 032, Train Loss: 0.004118, Test Loss: 0.004159 +Epoch: 033, Train Loss: 0.003933, Test Loss: 0.003995 +Epoch: 034, Train Loss: 0.003687, Test Loss: 0.003753 +Epoch: 035, Train Loss: 0.003449, Test Loss: 0.003509 +Epoch: 036, Train Loss: 0.003361, Test Loss: 0.003421 +Epoch: 037, Train Loss: 0.003373, Test Loss: 0.003438 +Epoch: 038, Train Loss: 0.003399, Test Loss: 0.003460 +Epoch: 039, Train Loss: 0.003387, Test Loss: 0.003437 +Epoch: 040, Train Loss: 0.003195, Test Loss: 0.003243 +Epoch: 041, Train Loss: 0.003161, Test Loss: 0.003208 +Epoch: 042, Train Loss: 0.003043, Test Loss: 0.003097 +Epoch: 043, Train Loss: 0.003178, Test Loss: 0.003233 +Epoch: 044, Train Loss: 0.002973, Test Loss: 0.003005 +Epoch: 045, Train Loss: 0.002876, Test Loss: 0.002935 +Epoch: 046, Train Loss: 0.002953, Test Loss: 0.002999 +Epoch: 047, Train Loss: 0.002940, Test Loss: 0.003003 +Epoch: 048, Train Loss: 0.003112, Test Loss: 0.003177 +Epoch: 049, Train Loss: 0.003024, Test Loss: 0.003087 +Epoch: 050, Train Loss: 0.002853, Test Loss: 0.002906 +Epoch: 051, Train Loss: 0.002791, Test Loss: 0.002840 +Epoch: 052, Train Loss: 0.002684, Test Loss: 0.002733 +Epoch: 053, Train Loss: 0.002741, Test Loss: 0.002794 +Epoch: 054, Train Loss: 0.002821, Test Loss: 0.002864 +Epoch: 055, Train Loss: 0.002796, Test Loss: 0.002838 +Epoch: 056, Train Loss: 0.002629, Test Loss: 0.002672 +Epoch: 057, Train Loss: 0.002585, Test Loss: 0.002619 +Epoch: 058, Train Loss: 0.002692, Test Loss: 0.002742 +Epoch: 059, Train Loss: 0.002519, Test Loss: 0.002566 +Epoch: 060, Train Loss: 0.002548, Test Loss: 0.002582 +Epoch: 061, Train Loss: 0.002486, Test Loss: 0.002524 +Epoch: 062, Train Loss: 0.002639, Test Loss: 0.002685 +Epoch: 063, Train Loss: 0.002484, Test Loss: 0.002514 +Epoch: 064, Train Loss: 0.002483, Test Loss: 0.002531 +Epoch: 065, Train Loss: 0.002434, Test Loss: 0.002469 +Epoch: 066, Train Loss: 0.002418, Test Loss: 0.002445 +Epoch: 067, Train Loss: 0.002229, Test Loss: 0.002274 +Epoch: 068, Train Loss: 0.002398, Test Loss: 0.002430 +Epoch: 069, Train Loss: 0.002341, Test Loss: 0.002373 +Epoch: 070, Train Loss: 0.002470, Test Loss: 0.002496 +Epoch: 071, Train Loss: 0.002318, Test Loss: 0.002347 +Epoch: 072, Train Loss: 0.002284, Test Loss: 0.002307 +Epoch: 073, Train Loss: 0.002353, Test Loss: 0.002380 +Epoch: 074, Train Loss: 0.002370, Test Loss: 0.002403 +Epoch: 075, Train Loss: 0.002214, Test Loss: 0.002249 +Epoch: 076, Train Loss: 0.002246, Test Loss: 0.002271 +Epoch: 077, Train Loss: 0.002182, Test Loss: 0.002214 +Epoch: 078, Train Loss: 0.002171, Test Loss: 0.002194 +Epoch: 079, Train Loss: 0.002115, Test Loss: 0.002154 +Epoch: 080, Train Loss: 0.002119, Test Loss: 0.002142 +Epoch: 081, Train Loss: 0.002104, Test Loss: 0.002140 +Epoch: 082, Train Loss: 0.002072, Test Loss: 0.002086 +Epoch: 083, Train Loss: 0.002116, Test Loss: 0.002141 +Epoch: 084, Train Loss: 0.001982, Test Loss: 0.002020 +Epoch: 085, Train Loss: 0.002053, Test Loss: 0.002058 +Epoch: 086, Train Loss: 0.001931, Test Loss: 0.001964 +Epoch: 087, Train Loss: 0.001907, Test Loss: 0.001934 +Epoch: 088, Train Loss: 0.002038, Test Loss: 0.002052 +Epoch: 089, Train Loss: 0.001925, Test Loss: 0.001938 +Epoch: 090, Train Loss: 0.001974, Test Loss: 0.001997 +Epoch: 091, Train Loss: 0.002047, Test Loss: 0.002051 +Epoch: 092, Train Loss: 0.002117, Test Loss: 0.002124 +Epoch: 093, Train Loss: 0.001960, Test Loss: 0.001964 +Epoch: 094, Train Loss: 0.001901, Test Loss: 0.001919 +Epoch: 095, Train Loss: 0.001925, Test Loss: 0.001931 +Epoch: 096, Train Loss: 0.001824, Test Loss: 0.001844 +Epoch: 097, Train Loss: 0.001893, Test Loss: 0.001903 +Epoch: 098, Train Loss: 0.001913, Test Loss: 0.001923 +Epoch: 099, Train Loss: 0.001843, Test Loss: 0.001849 diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/64ch/20e/GNN_state_pred_het_dict b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/64ch/20e/GNN_state_pred_het_dict new file mode 100644 index 000000000..36569031d Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/64ch/20e/GNN_state_pred_het_dict differ diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/64ch/20e/GNN_state_pred_het_full b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/64ch/20e/GNN_state_pred_het_full new file mode 100644 index 000000000..302af6f2d Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/64ch/20e/GNN_state_pred_het_full differ diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/64ch/20e/train_res b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/64ch/20e/train_res new file mode 100644 index 000000000..061a774f3 --- /dev/null +++ b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/64ch/20e/train_res @@ -0,0 +1,20 @@ +Epoch: 000, Train Loss: 0.038977, Test Loss: 0.038516 +Epoch: 001, Train Loss: 0.021613, Test Loss: 0.021252 +Epoch: 002, Train Loss: 0.015219, Test Loss: 0.014979 +Epoch: 003, Train Loss: 0.012758, Test Loss: 0.012641 +Epoch: 004, Train Loss: 0.010339, Test Loss: 0.010257 +Epoch: 005, Train Loss: 0.009468, Test Loss: 0.009407 +Epoch: 006, Train Loss: 0.008761, Test Loss: 0.008682 +Epoch: 007, Train Loss: 0.008724, Test Loss: 0.008638 +Epoch: 008, Train Loss: 0.007264, Test Loss: 0.007202 +Epoch: 009, Train Loss: 0.006791, Test Loss: 0.006737 +Epoch: 010, Train Loss: 0.006485, Test Loss: 0.006446 +Epoch: 011, Train Loss: 0.005976, Test Loss: 0.005945 +Epoch: 012, Train Loss: 0.006247, Test Loss: 0.006212 +Epoch: 013, Train Loss: 0.005417, Test Loss: 0.005384 +Epoch: 014, Train Loss: 0.005269, Test Loss: 0.005255 +Epoch: 015, Train Loss: 0.005017, Test Loss: 0.005002 +Epoch: 016, Train Loss: 0.005051, Test Loss: 0.005055 +Epoch: 017, Train Loss: 0.004882, Test Loss: 0.004887 +Epoch: 018, Train Loss: 0.004844, Test Loss: 0.004865 +Epoch: 019, Train Loss: 0.004383, Test Loss: 0.004402 diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/64ch/50e/GNN_state_pred_het_dict b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/64ch/50e/GNN_state_pred_het_dict new file mode 100644 index 000000000..859c238ca Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/64ch/50e/GNN_state_pred_het_dict differ diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/64ch/50e/GNN_state_pred_het_full b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/64ch/50e/GNN_state_pred_het_full new file mode 100644 index 000000000..596555abe Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/64ch/50e/GNN_state_pred_het_full differ diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/64ch/50e/train_res b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/64ch/50e/train_res new file mode 100644 index 000000000..ee7313f15 --- /dev/null +++ b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/64ch/50e/train_res @@ -0,0 +1,50 @@ +Epoch: 000, Train Loss: 0.041597, Test Loss: 0.042314 +Epoch: 001, Train Loss: 0.022224, Test Loss: 0.022520 +Epoch: 002, Train Loss: 0.015402, Test Loss: 0.015499 +Epoch: 003, Train Loss: 0.012459, Test Loss: 0.012488 +Epoch: 004, Train Loss: 0.010423, Test Loss: 0.010482 +Epoch: 005, Train Loss: 0.009028, Test Loss: 0.009105 +Epoch: 006, Train Loss: 0.008365, Test Loss: 0.008432 +Epoch: 007, Train Loss: 0.007737, Test Loss: 0.007772 +Epoch: 008, Train Loss: 0.007703, Test Loss: 0.007679 +Epoch: 009, Train Loss: 0.007362, Test Loss: 0.007345 +Epoch: 010, Train Loss: 0.007206, Test Loss: 0.007160 +Epoch: 011, Train Loss: 0.007105, Test Loss: 0.007049 +Epoch: 012, Train Loss: 0.006998, Test Loss: 0.006952 +Epoch: 013, Train Loss: 0.006941, Test Loss: 0.006890 +Epoch: 014, Train Loss: 0.006803, Test Loss: 0.006776 +Epoch: 015, Train Loss: 0.006417, Test Loss: 0.006379 +Epoch: 016, Train Loss: 0.006711, Test Loss: 0.006704 +Epoch: 017, Train Loss: 0.006518, Test Loss: 0.006511 +Epoch: 018, Train Loss: 0.005905, Test Loss: 0.005912 +Epoch: 019, Train Loss: 0.005999, Test Loss: 0.005999 +Epoch: 020, Train Loss: 0.005870, Test Loss: 0.005876 +Epoch: 021, Train Loss: 0.005153, Test Loss: 0.005179 +Epoch: 022, Train Loss: 0.004757, Test Loss: 0.004792 +Epoch: 023, Train Loss: 0.004716, Test Loss: 0.004747 +Epoch: 024, Train Loss: 0.004693, Test Loss: 0.004728 +Epoch: 025, Train Loss: 0.004347, Test Loss: 0.004368 +Epoch: 026, Train Loss: 0.004345, Test Loss: 0.004373 +Epoch: 027, Train Loss: 0.004342, Test Loss: 0.004363 +Epoch: 028, Train Loss: 0.004469, Test Loss: 0.004503 +Epoch: 029, Train Loss: 0.004789, Test Loss: 0.004808 +Epoch: 030, Train Loss: 0.004646, Test Loss: 0.004667 +Epoch: 031, Train Loss: 0.004922, Test Loss: 0.004936 +Epoch: 032, Train Loss: 0.005485, Test Loss: 0.005505 +Epoch: 033, Train Loss: 0.005273, Test Loss: 0.005267 +Epoch: 034, Train Loss: 0.004670, Test Loss: 0.004688 +Epoch: 035, Train Loss: 0.004818, Test Loss: 0.004826 +Epoch: 036, Train Loss: 0.004808, Test Loss: 0.004823 +Epoch: 037, Train Loss: 0.003915, Test Loss: 0.003935 +Epoch: 038, Train Loss: 0.003706, Test Loss: 0.003749 +Epoch: 039, Train Loss: 0.003967, Test Loss: 0.003998 +Epoch: 040, Train Loss: 0.003534, Test Loss: 0.003591 +Epoch: 041, Train Loss: 0.003419, Test Loss: 0.003499 +Epoch: 042, Train Loss: 0.003418, Test Loss: 0.003476 +Epoch: 043, Train Loss: 0.003506, Test Loss: 0.003560 +Epoch: 044, Train Loss: 0.003293, Test Loss: 0.003342 +Epoch: 045, Train Loss: 0.003199, Test Loss: 0.003255 +Epoch: 046, Train Loss: 0.003239, Test Loss: 0.003297 +Epoch: 047, Train Loss: 0.003402, Test Loss: 0.003442 +Epoch: 048, Train Loss: 0.003318, Test Loss: 0.003360 +Epoch: 049, Train Loss: 0.003193, Test Loss: 0.003257 diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/model.py b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/model.py new file mode 100644 index 000000000..1637ec57b --- /dev/null +++ b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG2VerticesDouble/model.py @@ -0,0 +1,65 @@ +import torch +from torch.nn import Linear +from torch_geometric.nn import TAGConv, SAGEConv, GraphConv, RGCNConv + + +class StateModelEncoder(torch.nn.Module): + def __init__(self, hidden_channels, out_channels): + super().__init__() + self.conv1 = RGCNConv(5, hidden_channels, 3) + self.conv12 = TAGConv(hidden_channels, hidden_channels, 2) + self.conv2 = TAGConv(hidden_channels, hidden_channels, 3) + self.conv3 = GraphConv((-1, -1), hidden_channels) + self.conv32 = SAGEConv((-1, -1), hidden_channels) + self.conv4 = SAGEConv((-1, -1), hidden_channels) + self.conv42 = SAGEConv((-1, -1), hidden_channels) + self.conv5 = SAGEConv(-1, hidden_channels) + self.lin = Linear(hidden_channels, out_channels) + + def forward( + self, + game_x, + state_x, + edge_index_v_v, + edge_type_v_v, + edge_index_history_v_s, + edge_attr_history_v_s, + edge_index_in_v_s, + edge_index_s_s, + ): + game_x = self.conv1(game_x, edge_index_v_v, edge_type_v_v).relu() + + game_x = self.conv12(game_x, edge_index_v_v).relu() + + state_x = self.conv3( + (game_x, state_x), + edge_index_history_v_s, + edge_attr_history_v_s, + ).relu() + + state_x = self.conv32( + (game_x, state_x), + edge_index_history_v_s, + ).relu() + + state_x = self.conv4( + (game_x, state_x), + edge_index_in_v_s, + ).relu() + + state_x = self.conv42( + (game_x, state_x), + edge_index_in_v_s, + ).relu() + + state_x = self.conv2( + state_x, + edge_index_s_s, + ).relu() + + state_x = self.conv5( + state_x, + edge_index_s_s, + ).relu() + + return self.lin(state_x) diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3SageDouble/64ch/100e/GNN_state_pred_het_dict b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3SageDouble/64ch/100e/GNN_state_pred_het_dict new file mode 100644 index 000000000..106094615 Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3SageDouble/64ch/100e/GNN_state_pred_het_dict differ diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3SageDouble/64ch/100e/GNN_state_pred_het_full b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3SageDouble/64ch/100e/GNN_state_pred_het_full new file mode 100644 index 000000000..8d962d82a Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3SageDouble/64ch/100e/GNN_state_pred_het_full differ diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3SageDouble/64ch/100e/train_res b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3SageDouble/64ch/100e/train_res new file mode 100644 index 000000000..14af802df --- /dev/null +++ b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3SageDouble/64ch/100e/train_res @@ -0,0 +1,100 @@ +Epoch: 000, Train Loss: 0.048970, Test Loss: 0.048667 +Epoch: 001, Train Loss: 0.030800, Test Loss: 0.030511 +Epoch: 002, Train Loss: 0.025334, Test Loss: 0.025270 +Epoch: 003, Train Loss: 0.023651, Test Loss: 0.023538 +Epoch: 004, Train Loss: 0.019162, Test Loss: 0.019080 +Epoch: 005, Train Loss: 0.019853, Test Loss: 0.019819 +Epoch: 006, Train Loss: 0.014548, Test Loss: 0.014642 +Epoch: 007, Train Loss: 0.015340, Test Loss: 0.015407 +Epoch: 008, Train Loss: 0.013076, Test Loss: 0.013189 +Epoch: 009, Train Loss: 0.010759, Test Loss: 0.010827 +Epoch: 010, Train Loss: 0.009676, Test Loss: 0.009796 +Epoch: 011, Train Loss: 0.009729, Test Loss: 0.009877 +Epoch: 012, Train Loss: 0.009505, Test Loss: 0.009588 +Epoch: 013, Train Loss: 0.007938, Test Loss: 0.008046 +Epoch: 014, Train Loss: 0.007638, Test Loss: 0.007730 +Epoch: 015, Train Loss: 0.007149, Test Loss: 0.007246 +Epoch: 016, Train Loss: 0.007712, Test Loss: 0.007853 +Epoch: 017, Train Loss: 0.007507, Test Loss: 0.007556 +Epoch: 018, Train Loss: 0.006890, Test Loss: 0.006997 +Epoch: 019, Train Loss: 0.006902, Test Loss: 0.007011 +Epoch: 020, Train Loss: 0.006120, Test Loss: 0.006238 +Epoch: 021, Train Loss: 0.006021, Test Loss: 0.006144 +Epoch: 022, Train Loss: 0.005587, Test Loss: 0.005711 +Epoch: 023, Train Loss: 0.005837, Test Loss: 0.005951 +Epoch: 024, Train Loss: 0.005759, Test Loss: 0.005873 +Epoch: 025, Train Loss: 0.005572, Test Loss: 0.005677 +Epoch: 026, Train Loss: 0.005298, Test Loss: 0.005421 +Epoch: 027, Train Loss: 0.005178, Test Loss: 0.005298 +Epoch: 028, Train Loss: 0.005297, Test Loss: 0.005416 +Epoch: 029, Train Loss: 0.005530, Test Loss: 0.005629 +Epoch: 030, Train Loss: 0.005050, Test Loss: 0.005169 +Epoch: 031, Train Loss: 0.005010, Test Loss: 0.005124 +Epoch: 032, Train Loss: 0.005088, Test Loss: 0.005215 +Epoch: 033, Train Loss: 0.004981, Test Loss: 0.005100 +Epoch: 034, Train Loss: 0.004784, Test Loss: 0.004912 +Epoch: 035, Train Loss: 0.004354, Test Loss: 0.004489 +Epoch: 036, Train Loss: 0.004310, Test Loss: 0.004452 +Epoch: 037, Train Loss: 0.004201, Test Loss: 0.004338 +Epoch: 038, Train Loss: 0.004280, Test Loss: 0.004434 +Epoch: 039, Train Loss: 0.004249, Test Loss: 0.004401 +Epoch: 040, Train Loss: 0.004414, Test Loss: 0.004560 +Epoch: 041, Train Loss: 0.004089, Test Loss: 0.004244 +Epoch: 042, Train Loss: 0.004175, Test Loss: 0.004347 +Epoch: 043, Train Loss: 0.004278, Test Loss: 0.004436 +Epoch: 044, Train Loss: 0.004163, Test Loss: 0.004314 +Epoch: 045, Train Loss: 0.004047, Test Loss: 0.004236 +Epoch: 046, Train Loss: 0.004052, Test Loss: 0.004219 +Epoch: 047, Train Loss: 0.003960, Test Loss: 0.004133 +Epoch: 048, Train Loss: 0.003730, Test Loss: 0.003902 +Epoch: 049, Train Loss: 0.003869, Test Loss: 0.004039 +Epoch: 050, Train Loss: 0.003696, Test Loss: 0.003866 +Epoch: 051, Train Loss: 0.003892, Test Loss: 0.004041 +Epoch: 052, Train Loss: 0.004020, Test Loss: 0.004185 +Epoch: 053, Train Loss: 0.003810, Test Loss: 0.003983 +Epoch: 054, Train Loss: 0.003761, Test Loss: 0.003915 +Epoch: 055, Train Loss: 0.003905, Test Loss: 0.004070 +Epoch: 056, Train Loss: 0.003691, Test Loss: 0.003864 +Epoch: 057, Train Loss: 0.003878, Test Loss: 0.004045 +Epoch: 058, Train Loss: 0.003448, Test Loss: 0.003622 +Epoch: 059, Train Loss: 0.003818, Test Loss: 0.003982 +Epoch: 060, Train Loss: 0.003715, Test Loss: 0.003885 +Epoch: 061, Train Loss: 0.003829, Test Loss: 0.003994 +Epoch: 062, Train Loss: 0.004003, Test Loss: 0.004183 +Epoch: 063, Train Loss: 0.003527, Test Loss: 0.003699 +Epoch: 064, Train Loss: 0.003164, Test Loss: 0.003315 +Epoch: 065, Train Loss: 0.003364, Test Loss: 0.003543 +Epoch: 066, Train Loss: 0.003863, Test Loss: 0.004034 +Epoch: 067, Train Loss: 0.003042, Test Loss: 0.003210 +Epoch: 068, Train Loss: 0.003170, Test Loss: 0.003324 +Epoch: 069, Train Loss: 0.003216, Test Loss: 0.003375 +Epoch: 070, Train Loss: 0.003173, Test Loss: 0.003336 +Epoch: 071, Train Loss: 0.002911, Test Loss: 0.003069 +Epoch: 072, Train Loss: 0.003125, Test Loss: 0.003285 +Epoch: 073, Train Loss: 0.003070, Test Loss: 0.003210 +Epoch: 074, Train Loss: 0.002740, Test Loss: 0.002884 +Epoch: 075, Train Loss: 0.003088, Test Loss: 0.003235 +Epoch: 076, Train Loss: 0.003172, Test Loss: 0.003334 +Epoch: 077, Train Loss: 0.002919, Test Loss: 0.003071 +Epoch: 078, Train Loss: 0.002999, Test Loss: 0.003141 +Epoch: 079, Train Loss: 0.003031, Test Loss: 0.003189 +Epoch: 080, Train Loss: 0.002906, Test Loss: 0.003084 +Epoch: 081, Train Loss: 0.002974, Test Loss: 0.003123 +Epoch: 082, Train Loss: 0.002798, Test Loss: 0.002937 +Epoch: 083, Train Loss: 0.003092, Test Loss: 0.003233 +Epoch: 084, Train Loss: 0.003443, Test Loss: 0.003602 +Epoch: 085, Train Loss: 0.002840, Test Loss: 0.002990 +Epoch: 086, Train Loss: 0.002805, Test Loss: 0.002959 +Epoch: 087, Train Loss: 0.002878, Test Loss: 0.003014 +Epoch: 088, Train Loss: 0.002929, Test Loss: 0.003086 +Epoch: 089, Train Loss: 0.002926, Test Loss: 0.003084 +Epoch: 090, Train Loss: 0.002785, Test Loss: 0.002938 +Epoch: 091, Train Loss: 0.002726, Test Loss: 0.002869 +Epoch: 092, Train Loss: 0.003428, Test Loss: 0.003573 +Epoch: 093, Train Loss: 0.003024, Test Loss: 0.003174 +Epoch: 094, Train Loss: 0.002989, Test Loss: 0.003127 +Epoch: 095, Train Loss: 0.002801, Test Loss: 0.002942 +Epoch: 096, Train Loss: 0.003118, Test Loss: 0.003258 +Epoch: 097, Train Loss: 0.003105, Test Loss: 0.003258 +Epoch: 098, Train Loss: 0.002740, Test Loss: 0.002885 +Epoch: 099, Train Loss: 0.002515, Test Loss: 0.002646 diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3SageDouble/64ch/20e/GNN_state_pred_het_dict b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3SageDouble/64ch/20e/GNN_state_pred_het_dict new file mode 100644 index 000000000..c2843a79b Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3SageDouble/64ch/20e/GNN_state_pred_het_dict differ diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3SageDouble/64ch/20e/GNN_state_pred_het_full b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3SageDouble/64ch/20e/GNN_state_pred_het_full new file mode 100644 index 000000000..6adbe6bb5 Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3SageDouble/64ch/20e/GNN_state_pred_het_full differ diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3SageDouble/64ch/20e/train_res b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3SageDouble/64ch/20e/train_res new file mode 100644 index 000000000..c4d889494 --- /dev/null +++ b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3SageDouble/64ch/20e/train_res @@ -0,0 +1,20 @@ +Epoch: 000, Train Loss: 0.050863, Test Loss: 0.052546 +Epoch: 001, Train Loss: 0.029373, Test Loss: 0.030833 +Epoch: 002, Train Loss: 0.020787, Test Loss: 0.021997 +Epoch: 003, Train Loss: 0.016364, Test Loss: 0.017456 +Epoch: 004, Train Loss: 0.014149, Test Loss: 0.015224 +Epoch: 005, Train Loss: 0.012573, Test Loss: 0.013564 +Epoch: 006, Train Loss: 0.010724, Test Loss: 0.011628 +Epoch: 007, Train Loss: 0.009498, Test Loss: 0.010333 +Epoch: 008, Train Loss: 0.008883, Test Loss: 0.009697 +Epoch: 009, Train Loss: 0.008178, Test Loss: 0.008891 +Epoch: 010, Train Loss: 0.007793, Test Loss: 0.008484 +Epoch: 011, Train Loss: 0.007520, Test Loss: 0.008133 +Epoch: 012, Train Loss: 0.007100, Test Loss: 0.007761 +Epoch: 013, Train Loss: 0.006461, Test Loss: 0.007043 +Epoch: 014, Train Loss: 0.006458, Test Loss: 0.007039 +Epoch: 015, Train Loss: 0.006238, Test Loss: 0.006798 +Epoch: 016, Train Loss: 0.006034, Test Loss: 0.006582 +Epoch: 017, Train Loss: 0.005676, Test Loss: 0.006194 +Epoch: 018, Train Loss: 0.005672, Test Loss: 0.006189 +Epoch: 019, Train Loss: 0.005481, Test Loss: 0.005976 diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3SageDouble/64ch/50e/GNN_state_pred_het_dict b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3SageDouble/64ch/50e/GNN_state_pred_het_dict new file mode 100644 index 000000000..15067a261 Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3SageDouble/64ch/50e/GNN_state_pred_het_dict differ diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3SageDouble/64ch/50e/GNN_state_pred_het_full b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3SageDouble/64ch/50e/GNN_state_pred_het_full new file mode 100644 index 000000000..142373748 Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3SageDouble/64ch/50e/GNN_state_pred_het_full differ diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3SageDouble/64ch/50e/train_res b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3SageDouble/64ch/50e/train_res new file mode 100644 index 000000000..e58564d23 --- /dev/null +++ b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3SageDouble/64ch/50e/train_res @@ -0,0 +1,50 @@ +Epoch: 000, Train Loss: 0.057171, Test Loss: 0.058010 +Epoch: 001, Train Loss: 0.030787, Test Loss: 0.031484 +Epoch: 002, Train Loss: 0.023136, Test Loss: 0.023673 +Epoch: 003, Train Loss: 0.019109, Test Loss: 0.019526 +Epoch: 004, Train Loss: 0.017071, Test Loss: 0.017409 +Epoch: 005, Train Loss: 0.014807, Test Loss: 0.015129 +Epoch: 006, Train Loss: 0.014352, Test Loss: 0.014680 +Epoch: 007, Train Loss: 0.012505, Test Loss: 0.012756 +Epoch: 008, Train Loss: 0.011339, Test Loss: 0.011526 +Epoch: 009, Train Loss: 0.011503, Test Loss: 0.011724 +Epoch: 010, Train Loss: 0.009757, Test Loss: 0.009975 +Epoch: 011, Train Loss: 0.009136, Test Loss: 0.009331 +Epoch: 012, Train Loss: 0.009876, Test Loss: 0.010134 +Epoch: 013, Train Loss: 0.009290, Test Loss: 0.009502 +Epoch: 014, Train Loss: 0.007732, Test Loss: 0.007969 +Epoch: 015, Train Loss: 0.008310, Test Loss: 0.008550 +Epoch: 016, Train Loss: 0.007947, Test Loss: 0.008159 +Epoch: 017, Train Loss: 0.007175, Test Loss: 0.007429 +Epoch: 018, Train Loss: 0.006615, Test Loss: 0.006852 +Epoch: 019, Train Loss: 0.006258, Test Loss: 0.006468 +Epoch: 020, Train Loss: 0.006021, Test Loss: 0.006213 +Epoch: 021, Train Loss: 0.005957, Test Loss: 0.006177 +Epoch: 022, Train Loss: 0.005714, Test Loss: 0.005900 +Epoch: 023, Train Loss: 0.005592, Test Loss: 0.005798 +Epoch: 024, Train Loss: 0.005693, Test Loss: 0.005882 +Epoch: 025, Train Loss: 0.005862, Test Loss: 0.006065 +Epoch: 026, Train Loss: 0.005933, Test Loss: 0.006124 +Epoch: 027, Train Loss: 0.004832, Test Loss: 0.004976 +Epoch: 028, Train Loss: 0.005156, Test Loss: 0.005320 +Epoch: 029, Train Loss: 0.004828, Test Loss: 0.004969 +Epoch: 030, Train Loss: 0.004528, Test Loss: 0.004665 +Epoch: 031, Train Loss: 0.004441, Test Loss: 0.004558 +Epoch: 032, Train Loss: 0.004391, Test Loss: 0.004523 +Epoch: 033, Train Loss: 0.004387, Test Loss: 0.004529 +Epoch: 034, Train Loss: 0.004330, Test Loss: 0.004454 +Epoch: 035, Train Loss: 0.004358, Test Loss: 0.004487 +Epoch: 036, Train Loss: 0.004368, Test Loss: 0.004527 +Epoch: 037, Train Loss: 0.004413, Test Loss: 0.004563 +Epoch: 038, Train Loss: 0.004288, Test Loss: 0.004420 +Epoch: 039, Train Loss: 0.004150, Test Loss: 0.004276 +Epoch: 040, Train Loss: 0.004238, Test Loss: 0.004380 +Epoch: 041, Train Loss: 0.004212, Test Loss: 0.004351 +Epoch: 042, Train Loss: 0.004132, Test Loss: 0.004276 +Epoch: 043, Train Loss: 0.004148, Test Loss: 0.004289 +Epoch: 044, Train Loss: 0.003967, Test Loss: 0.004098 +Epoch: 045, Train Loss: 0.003948, Test Loss: 0.004084 +Epoch: 046, Train Loss: 0.003984, Test Loss: 0.004108 +Epoch: 047, Train Loss: 0.003791, Test Loss: 0.003920 +Epoch: 048, Train Loss: 0.004121, Test Loss: 0.004241 +Epoch: 049, Train Loss: 0.003816, Test Loss: 0.003945 diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3SageDouble/model.py b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3SageDouble/model.py new file mode 100644 index 000000000..a6060aabd --- /dev/null +++ b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3SageDouble/model.py @@ -0,0 +1,62 @@ +import torch +from torch.nn import Linear +from torch_geometric.nn import TAGConv, SAGEConv, GraphConv, RGCNConv + + +class StateModelEncoder(torch.nn.Module): + def __init__(self, hidden_channels, out_channels): + super().__init__() + self.conv1 = RGCNConv(5, hidden_channels, 3) + self.conv2 = TAGConv(hidden_channels, hidden_channels, 3) # TAGConv + self.conv3 = GraphConv((-1, -1), hidden_channels) # SAGEConv + self.conv32 = SAGEConv((-1, -1), hidden_channels) # SAGEConv + self.conv4 = SAGEConv((-1, -1), hidden_channels) + self.conv42 = SAGEConv((-1, -1), hidden_channels) + self.conv5 = SAGEConv(-1, hidden_channels) + self.lin = Linear(hidden_channels, out_channels) + + def forward( + self, + game_x, + state_x, + edge_index_v_v, + edge_type_v_v, + edge_index_history_v_s, + edge_attr_history_v_s, + edge_index_in_v_s, + edge_index_s_s, + ): + game_x = self.conv1(game_x, edge_index_v_v, edge_type_v_v).relu() + + state_x = self.conv3( + (game_x, state_x), + edge_index_history_v_s, + edge_attr_history_v_s, + ).relu() + + state_x = self.conv32( + (game_x, state_x), + edge_index_history_v_s, + ).relu() + + state_x = self.conv4( + (game_x, state_x), + edge_index_in_v_s, + ).relu() + + state_x = self.conv42( + (game_x, state_x), + edge_index_in_v_s, + ).relu() + + state_x = self.conv2( + state_x, + edge_index_s_s, + ).relu() + + state_x = self.conv5( + state_x, + edge_index_s_s, + ).relu() + + return self.lin(state_x) diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDouble/model.py b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDouble/model.py new file mode 100644 index 000000000..db9797df0 --- /dev/null +++ b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDouble/model.py @@ -0,0 +1,65 @@ +import torch +from torch.nn import Linear +from torch_geometric.nn import TAGConv, SAGEConv, GraphConv, RGCNConv + + +class StateModelEncoder(torch.nn.Module): + def __init__(self, hidden_channels, out_channels): + super().__init__() + self.conv1 = RGCNConv(5, hidden_channels, 3) + self.conv12 = TAGConv(hidden_channels, hidden_channels, 3) + self.conv2 = TAGConv(hidden_channels, hidden_channels, 3) + self.conv3 = GraphConv((-1, -1), hidden_channels) + self.conv32 = SAGEConv((-1, -1), hidden_channels) + self.conv4 = SAGEConv((-1, -1), hidden_channels) + self.conv42 = SAGEConv((-1, -1), hidden_channels) + self.conv5 = SAGEConv(-1, hidden_channels) + self.lin = Linear(hidden_channels, out_channels) + + def forward( + self, + game_x, + state_x, + edge_index_v_v, + edge_type_v_v, + edge_index_history_v_s, + edge_attr_history_v_s, + edge_index_in_v_s, + edge_index_s_s, + ): + game_x = self.conv1(game_x, edge_index_v_v, edge_type_v_v).relu() + + game_x = self.conv12(game_x, edge_index_v_v).relu() + + state_x = self.conv3( + (game_x, state_x), + edge_index_history_v_s, + edge_attr_history_v_s, + ).relu() + + state_x = self.conv32( + (game_x, state_x), + edge_index_history_v_s, + ).relu() + + state_x = self.conv4( + (game_x, state_x), + edge_index_in_v_s, + ).relu() + + state_x = self.conv42( + (game_x, state_x), + edge_index_in_v_s, + ).relu() + + state_x = self.conv2( + state_x, + edge_index_s_s, + ).relu() + + state_x = self.conv5( + state_x, + edge_index_s_s, + ).relu() + + return self.lin(state_x) diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateMean/64ch/50e/GNN_state_pred_het_dict b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateMean/64ch/50e/GNN_state_pred_het_dict new file mode 100644 index 000000000..1842159cd Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateMean/64ch/50e/GNN_state_pred_het_dict differ diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateMean/64ch/50e/GNN_state_pred_het_full b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateMean/64ch/50e/GNN_state_pred_het_full new file mode 100644 index 000000000..7ac6a4bf2 Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateMean/64ch/50e/GNN_state_pred_het_full differ diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateMean/64ch/50e/train_res b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateMean/64ch/50e/train_res new file mode 100644 index 000000000..fb911157c --- /dev/null +++ b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateMean/64ch/50e/train_res @@ -0,0 +1,50 @@ +Epoch: 000, Train Loss: 0.054918, Test Loss: 0.053425 +Epoch: 001, Train Loss: 0.028589, Test Loss: 0.027859 +Epoch: 002, Train Loss: 0.019864, Test Loss: 0.019639 +Epoch: 003, Train Loss: 0.016030, Test Loss: 0.016005 +Epoch: 004, Train Loss: 0.013104, Test Loss: 0.013210 +Epoch: 005, Train Loss: 0.011782, Test Loss: 0.011952 +Epoch: 006, Train Loss: 0.010417, Test Loss: 0.010594 +Epoch: 007, Train Loss: 0.010224, Test Loss: 0.010432 +Epoch: 008, Train Loss: 0.008963, Test Loss: 0.009149 +Epoch: 009, Train Loss: 0.007912, Test Loss: 0.008077 +Epoch: 010, Train Loss: 0.007760, Test Loss: 0.007913 +Epoch: 011, Train Loss: 0.007213, Test Loss: 0.007336 +Epoch: 012, Train Loss: 0.006648, Test Loss: 0.006765 +Epoch: 013, Train Loss: 0.006204, Test Loss: 0.006312 +Epoch: 014, Train Loss: 0.006395, Test Loss: 0.006482 +Epoch: 015, Train Loss: 0.005600, Test Loss: 0.005681 +Epoch: 016, Train Loss: 0.005745, Test Loss: 0.005843 +Epoch: 017, Train Loss: 0.005498, Test Loss: 0.005565 +Epoch: 018, Train Loss: 0.005247, Test Loss: 0.005295 +Epoch: 019, Train Loss: 0.005341, Test Loss: 0.005394 +Epoch: 020, Train Loss: 0.005133, Test Loss: 0.005173 +Epoch: 021, Train Loss: 0.005180, Test Loss: 0.005224 +Epoch: 022, Train Loss: 0.005078, Test Loss: 0.005122 +Epoch: 023, Train Loss: 0.004958, Test Loss: 0.005002 +Epoch: 024, Train Loss: 0.004883, Test Loss: 0.004910 +Epoch: 025, Train Loss: 0.004655, Test Loss: 0.004690 +Epoch: 026, Train Loss: 0.004500, Test Loss: 0.004535 +Epoch: 027, Train Loss: 0.004508, Test Loss: 0.004511 +Epoch: 028, Train Loss: 0.004532, Test Loss: 0.004557 +Epoch: 029, Train Loss: 0.004223, Test Loss: 0.004213 +Epoch: 030, Train Loss: 0.004052, Test Loss: 0.004052 +Epoch: 031, Train Loss: 0.004143, Test Loss: 0.004105 +Epoch: 032, Train Loss: 0.004163, Test Loss: 0.004147 +Epoch: 033, Train Loss: 0.003878, Test Loss: 0.003880 +Epoch: 034, Train Loss: 0.003943, Test Loss: 0.003921 +Epoch: 035, Train Loss: 0.003724, Test Loss: 0.003701 +Epoch: 036, Train Loss: 0.004015, Test Loss: 0.003975 +Epoch: 037, Train Loss: 0.003921, Test Loss: 0.003890 +Epoch: 038, Train Loss: 0.003789, Test Loss: 0.003744 +Epoch: 039, Train Loss: 0.003739, Test Loss: 0.003705 +Epoch: 040, Train Loss: 0.003761, Test Loss: 0.003714 +Epoch: 041, Train Loss: 0.003625, Test Loss: 0.003575 +Epoch: 042, Train Loss: 0.003481, Test Loss: 0.003434 +Epoch: 043, Train Loss: 0.003394, Test Loss: 0.003348 +Epoch: 044, Train Loss: 0.003348, Test Loss: 0.003302 +Epoch: 045, Train Loss: 0.003272, Test Loss: 0.003233 +Epoch: 046, Train Loss: 0.003369, Test Loss: 0.003325 +Epoch: 047, Train Loss: 0.003224, Test Loss: 0.003172 +Epoch: 048, Train Loss: 0.003286, Test Loss: 0.003241 +Epoch: 049, Train Loss: 0.003164, Test Loss: 0.003122 diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateMean/model.py b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateMean/model.py new file mode 100644 index 000000000..4cd06d10e --- /dev/null +++ b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateMean/model.py @@ -0,0 +1,70 @@ +import torch +from torch.nn import Linear +from torch_geometric.nn import TAGConv, SAGEConv, GraphConv, RGCNConv, HeteroConv +from torch_geometric.nn.conv.hetero_conv import group + + +class StateModelEncoder(torch.nn.Module): + def __init__(self, hidden_channels, out_channels): + super().__init__() + self.aggr = "mean" + self.conv1 = RGCNConv(5, hidden_channels, 3) + self.conv12 = TAGConv(5, hidden_channels, 3) + self.conv2 = TAGConv(hidden_channels, hidden_channels, 3) + self.conv3 = GraphConv((-1, -1), hidden_channels) + self.conv32 = SAGEConv((-1, -1), hidden_channels) + self.conv4 = SAGEConv((-1, -1), hidden_channels) + self.conv42 = SAGEConv((-1, -1), hidden_channels) + self.conv5 = SAGEConv(-1, hidden_channels) + self.lin = Linear(hidden_channels, out_channels) + + def forward( + self, + game_x, + state_x, + edge_index_v_v, + edge_type_v_v, + edge_index_history_v_s, + edge_attr_history_v_s, + edge_index_in_v_s, + edge_index_s_s, + ): + game_x_type = self.conv1(game_x, edge_index_v_v, edge_type_v_v) + + game_x_tag = self.conv12(game_x, edge_index_v_v) + + # Simple aggregation from heteroconv + game_x = group([game_x_type, game_x_tag], self.aggr) + + state_x = self.conv3( + (game_x, state_x), + edge_index_history_v_s, + edge_attr_history_v_s, + ).relu() + + state_x = self.conv32( + (game_x, state_x), + edge_index_history_v_s, + ).relu() + + state_x = self.conv4( + (game_x, state_x), + edge_index_in_v_s, + ).relu() + + state_x = self.conv42( + (game_x, state_x), + edge_index_in_v_s, + ).relu() + + state_x = self.conv2( + state_x, + edge_index_s_s, + ).relu() + + state_x = self.conv5( + state_x, + edge_index_s_s, + ).relu() + + return self.lin(state_x) diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateSum/64ch/100e/GNN_state_pred_het_dict b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateSum/64ch/100e/GNN_state_pred_het_dict new file mode 100644 index 000000000..f679cc82c Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateSum/64ch/100e/GNN_state_pred_het_dict differ diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateSum/64ch/100e/GNN_state_pred_het_full b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateSum/64ch/100e/GNN_state_pred_het_full new file mode 100644 index 000000000..c4e65d812 Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateSum/64ch/100e/GNN_state_pred_het_full differ diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateSum/64ch/100e/train_res b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateSum/64ch/100e/train_res new file mode 100644 index 000000000..143de3719 --- /dev/null +++ b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateSum/64ch/100e/train_res @@ -0,0 +1,100 @@ +Epoch: 000, Train Loss: 0.062594, Test Loss: 0.064086 +Epoch: 001, Train Loss: 0.034779, Test Loss: 0.035657 +Epoch: 002, Train Loss: 0.020366, Test Loss: 0.020907 +Epoch: 003, Train Loss: 0.015592, Test Loss: 0.015988 +Epoch: 004, Train Loss: 0.014100, Test Loss: 0.014482 +Epoch: 005, Train Loss: 0.011469, Test Loss: 0.011776 +Epoch: 006, Train Loss: 0.010333, Test Loss: 0.010631 +Epoch: 007, Train Loss: 0.009513, Test Loss: 0.009867 +Epoch: 008, Train Loss: 0.009158, Test Loss: 0.009485 +Epoch: 009, Train Loss: 0.008751, Test Loss: 0.009057 +Epoch: 010, Train Loss: 0.008127, Test Loss: 0.008428 +Epoch: 011, Train Loss: 0.007755, Test Loss: 0.008048 +Epoch: 012, Train Loss: 0.007586, Test Loss: 0.007845 +Epoch: 013, Train Loss: 0.007238, Test Loss: 0.007510 +Epoch: 014, Train Loss: 0.007009, Test Loss: 0.007243 +Epoch: 015, Train Loss: 0.006942, Test Loss: 0.007165 +Epoch: 016, Train Loss: 0.006652, Test Loss: 0.006844 +Epoch: 017, Train Loss: 0.006418, Test Loss: 0.006619 +Epoch: 018, Train Loss: 0.006240, Test Loss: 0.006432 +Epoch: 019, Train Loss: 0.006069, Test Loss: 0.006289 +Epoch: 020, Train Loss: 0.006013, Test Loss: 0.006203 +Epoch: 021, Train Loss: 0.006111, Test Loss: 0.006301 +Epoch: 022, Train Loss: 0.005387, Test Loss: 0.005589 +Epoch: 023, Train Loss: 0.005583, Test Loss: 0.005779 +Epoch: 024, Train Loss: 0.005178, Test Loss: 0.005374 +Epoch: 025, Train Loss: 0.005174, Test Loss: 0.005409 +Epoch: 026, Train Loss: 0.005209, Test Loss: 0.005420 +Epoch: 027, Train Loss: 0.004828, Test Loss: 0.005040 +Epoch: 028, Train Loss: 0.004672, Test Loss: 0.004894 +Epoch: 029, Train Loss: 0.004702, Test Loss: 0.004923 +Epoch: 030, Train Loss: 0.004671, Test Loss: 0.004879 +Epoch: 031, Train Loss: 0.004485, Test Loss: 0.004700 +Epoch: 032, Train Loss: 0.004606, Test Loss: 0.004823 +Epoch: 033, Train Loss: 0.004387, Test Loss: 0.004601 +Epoch: 034, Train Loss: 0.004377, Test Loss: 0.004598 +Epoch: 035, Train Loss: 0.004362, Test Loss: 0.004580 +Epoch: 036, Train Loss: 0.004442, Test Loss: 0.004658 +Epoch: 037, Train Loss: 0.004280, Test Loss: 0.004497 +Epoch: 038, Train Loss: 0.004254, Test Loss: 0.004467 +Epoch: 039, Train Loss: 0.004030, Test Loss: 0.004251 +Epoch: 040, Train Loss: 0.004186, Test Loss: 0.004405 +Epoch: 041, Train Loss: 0.004238, Test Loss: 0.004457 +Epoch: 042, Train Loss: 0.003962, Test Loss: 0.004177 +Epoch: 043, Train Loss: 0.003952, Test Loss: 0.004166 +Epoch: 044, Train Loss: 0.003959, Test Loss: 0.004165 +Epoch: 045, Train Loss: 0.003943, Test Loss: 0.004178 +Epoch: 046, Train Loss: 0.003872, Test Loss: 0.004091 +Epoch: 047, Train Loss: 0.003891, Test Loss: 0.004108 +Epoch: 048, Train Loss: 0.003865, Test Loss: 0.004101 +Epoch: 049, Train Loss: 0.003785, Test Loss: 0.003993 +Epoch: 050, Train Loss: 0.003830, Test Loss: 0.004029 +Epoch: 051, Train Loss: 0.003776, Test Loss: 0.003987 +Epoch: 052, Train Loss: 0.003687, Test Loss: 0.003892 +Epoch: 053, Train Loss: 0.003841, Test Loss: 0.004028 +Epoch: 054, Train Loss: 0.003644, Test Loss: 0.003850 +Epoch: 055, Train Loss: 0.003685, Test Loss: 0.003871 +Epoch: 056, Train Loss: 0.003772, Test Loss: 0.003957 +Epoch: 057, Train Loss: 0.003760, Test Loss: 0.003928 +Epoch: 058, Train Loss: 0.003439, Test Loss: 0.003623 +Epoch: 059, Train Loss: 0.003517, Test Loss: 0.003701 +Epoch: 060, Train Loss: 0.003603, Test Loss: 0.003773 +Epoch: 061, Train Loss: 0.003486, Test Loss: 0.003659 +Epoch: 062, Train Loss: 0.003520, Test Loss: 0.003684 +Epoch: 063, Train Loss: 0.003512, Test Loss: 0.003685 +Epoch: 064, Train Loss: 0.003725, Test Loss: 0.003912 +Epoch: 065, Train Loss: 0.003449, Test Loss: 0.003624 +Epoch: 066, Train Loss: 0.003498, Test Loss: 0.003675 +Epoch: 067, Train Loss: 0.003489, Test Loss: 0.003680 +Epoch: 068, Train Loss: 0.003571, Test Loss: 0.003728 +Epoch: 069, Train Loss: 0.003484, Test Loss: 0.003653 +Epoch: 070, Train Loss: 0.003406, Test Loss: 0.003562 +Epoch: 071, Train Loss: 0.003353, Test Loss: 0.003523 +Epoch: 072, Train Loss: 0.003457, Test Loss: 0.003621 +Epoch: 073, Train Loss: 0.003421, Test Loss: 0.003575 +Epoch: 074, Train Loss: 0.003312, Test Loss: 0.003481 +Epoch: 075, Train Loss: 0.003187, Test Loss: 0.003338 +Epoch: 076, Train Loss: 0.003238, Test Loss: 0.003404 +Epoch: 077, Train Loss: 0.003344, Test Loss: 0.003515 +Epoch: 078, Train Loss: 0.003421, Test Loss: 0.003571 +Epoch: 079, Train Loss: 0.003280, Test Loss: 0.003442 +Epoch: 080, Train Loss: 0.003334, Test Loss: 0.003499 +Epoch: 081, Train Loss: 0.003332, Test Loss: 0.003498 +Epoch: 082, Train Loss: 0.003308, Test Loss: 0.003454 +Epoch: 083, Train Loss: 0.003273, Test Loss: 0.003430 +Epoch: 084, Train Loss: 0.003224, Test Loss: 0.003402 +Epoch: 085, Train Loss: 0.003360, Test Loss: 0.003501 +Epoch: 086, Train Loss: 0.003427, Test Loss: 0.003580 +Epoch: 087, Train Loss: 0.003089, Test Loss: 0.003246 +Epoch: 088, Train Loss: 0.003277, Test Loss: 0.003435 +Epoch: 089, Train Loss: 0.003244, Test Loss: 0.003411 +Epoch: 090, Train Loss: 0.002989, Test Loss: 0.003135 +Epoch: 091, Train Loss: 0.002874, Test Loss: 0.003037 +Epoch: 092, Train Loss: 0.003221, Test Loss: 0.003382 +Epoch: 093, Train Loss: 0.003048, Test Loss: 0.003194 +Epoch: 094, Train Loss: 0.003111, Test Loss: 0.003270 +Epoch: 095, Train Loss: 0.003662, Test Loss: 0.003805 +Epoch: 096, Train Loss: 0.003234, Test Loss: 0.003403 +Epoch: 097, Train Loss: 0.002997, Test Loss: 0.003191 +Epoch: 098, Train Loss: 0.002880, Test Loss: 0.003025 +Epoch: 099, Train Loss: 0.003089, Test Loss: 0.003251 diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateSum/64ch/2e/GNN_state_pred_het_dict b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateSum/64ch/2e/GNN_state_pred_het_dict new file mode 100644 index 000000000..b79799a17 Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateSum/64ch/2e/GNN_state_pred_het_dict differ diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateSum/64ch/2e/GNN_state_pred_het_full b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateSum/64ch/2e/GNN_state_pred_het_full new file mode 100644 index 000000000..90a9b595a Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateSum/64ch/2e/GNN_state_pred_het_full differ diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateSum/64ch/2e/train_res b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateSum/64ch/2e/train_res new file mode 100644 index 000000000..1f4e04e84 --- /dev/null +++ b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateSum/64ch/2e/train_res @@ -0,0 +1,2 @@ +Epoch: 000, Train Loss: 0.055533, Test Loss: 0.056853 +Epoch: 001, Train Loss: 0.029492, Test Loss: 0.030115 diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateSum/64ch/50e/GNN_state_pred_het_dict b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateSum/64ch/50e/GNN_state_pred_het_dict new file mode 100644 index 000000000..40c04d8e7 Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateSum/64ch/50e/GNN_state_pred_het_dict differ diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateSum/64ch/50e/GNN_state_pred_het_full b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateSum/64ch/50e/GNN_state_pred_het_full new file mode 100644 index 000000000..851efcc62 Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateSum/64ch/50e/GNN_state_pred_het_full differ diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateSum/64ch/50e/train_res b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateSum/64ch/50e/train_res new file mode 100644 index 000000000..38705bd91 --- /dev/null +++ b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateSum/64ch/50e/train_res @@ -0,0 +1,50 @@ +Epoch: 000, Train Loss: 0.048158, Test Loss: 0.046577 +Epoch: 001, Train Loss: 0.032677, Test Loss: 0.031920 +Epoch: 002, Train Loss: 0.024185, Test Loss: 0.023294 +Epoch: 003, Train Loss: 0.017004, Test Loss: 0.016455 +Epoch: 004, Train Loss: 0.015923, Test Loss: 0.015428 +Epoch: 005, Train Loss: 0.012461, Test Loss: 0.012148 +Epoch: 006, Train Loss: 0.010531, Test Loss: 0.010286 +Epoch: 007, Train Loss: 0.010108, Test Loss: 0.009872 +Epoch: 008, Train Loss: 0.008784, Test Loss: 0.008559 +Epoch: 009, Train Loss: 0.008499, Test Loss: 0.008302 +Epoch: 010, Train Loss: 0.007457, Test Loss: 0.007261 +Epoch: 011, Train Loss: 0.007141, Test Loss: 0.006983 +Epoch: 012, Train Loss: 0.006942, Test Loss: 0.006796 +Epoch: 013, Train Loss: 0.006582, Test Loss: 0.006431 +Epoch: 014, Train Loss: 0.006114, Test Loss: 0.006008 +Epoch: 015, Train Loss: 0.005992, Test Loss: 0.005844 +Epoch: 016, Train Loss: 0.005821, Test Loss: 0.005650 +Epoch: 017, Train Loss: 0.005416, Test Loss: 0.005283 +Epoch: 018, Train Loss: 0.005381, Test Loss: 0.005263 +Epoch: 019, Train Loss: 0.005180, Test Loss: 0.005089 +Epoch: 020, Train Loss: 0.005030, Test Loss: 0.004930 +Epoch: 021, Train Loss: 0.004922, Test Loss: 0.004842 +Epoch: 022, Train Loss: 0.004777, Test Loss: 0.004701 +Epoch: 023, Train Loss: 0.004852, Test Loss: 0.004752 +Epoch: 024, Train Loss: 0.004680, Test Loss: 0.004583 +Epoch: 025, Train Loss: 0.004480, Test Loss: 0.004428 +Epoch: 026, Train Loss: 0.004510, Test Loss: 0.004400 +Epoch: 027, Train Loss: 0.004321, Test Loss: 0.004270 +Epoch: 028, Train Loss: 0.004582, Test Loss: 0.004454 +Epoch: 029, Train Loss: 0.004193, Test Loss: 0.004129 +Epoch: 030, Train Loss: 0.004222, Test Loss: 0.004121 +Epoch: 031, Train Loss: 0.004181, Test Loss: 0.004117 +Epoch: 032, Train Loss: 0.004117, Test Loss: 0.004017 +Epoch: 033, Train Loss: 0.003995, Test Loss: 0.003924 +Epoch: 034, Train Loss: 0.004049, Test Loss: 0.003970 +Epoch: 035, Train Loss: 0.004063, Test Loss: 0.003996 +Epoch: 036, Train Loss: 0.003906, Test Loss: 0.003842 +Epoch: 037, Train Loss: 0.003967, Test Loss: 0.003881 +Epoch: 038, Train Loss: 0.003862, Test Loss: 0.003808 +Epoch: 039, Train Loss: 0.003863, Test Loss: 0.003804 +Epoch: 040, Train Loss: 0.003856, Test Loss: 0.003806 +Epoch: 041, Train Loss: 0.003822, Test Loss: 0.003762 +Epoch: 042, Train Loss: 0.003671, Test Loss: 0.003616 +Epoch: 043, Train Loss: 0.003683, Test Loss: 0.003636 +Epoch: 044, Train Loss: 0.003623, Test Loss: 0.003589 +Epoch: 045, Train Loss: 0.003573, Test Loss: 0.003537 +Epoch: 046, Train Loss: 0.003534, Test Loss: 0.003500 +Epoch: 047, Train Loss: 0.003558, Test Loss: 0.003514 +Epoch: 048, Train Loss: 0.003545, Test Loss: 0.003496 +Epoch: 049, Train Loss: 0.003433, Test Loss: 0.003388 diff --git a/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateSum/model.py b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateSum/model.py new file mode 100644 index 000000000..568f15756 --- /dev/null +++ b/VSharp.ML.AIAgent/ml/models/RGCNEdgeTypeTAG3VerticesDoubleAggregateSum/model.py @@ -0,0 +1,70 @@ +import torch +from torch.nn import Linear +from torch_geometric.nn import TAGConv, SAGEConv, GraphConv, RGCNConv, HeteroConv +from torch_geometric.nn.conv.hetero_conv import group + + +class StateModelEncoder(torch.nn.Module): + def __init__(self, hidden_channels, out_channels): + super().__init__() + self.aggr = "sum" + self.conv1 = RGCNConv(5, hidden_channels, 3) + self.conv12 = TAGConv(5, hidden_channels, 3) + self.conv2 = TAGConv(hidden_channels, hidden_channels, 3) + self.conv3 = GraphConv((-1, -1), hidden_channels) + self.conv32 = SAGEConv((-1, -1), hidden_channels) + self.conv4 = SAGEConv((-1, -1), hidden_channels) + self.conv42 = SAGEConv((-1, -1), hidden_channels) + self.conv5 = SAGEConv(-1, hidden_channels) + self.lin = Linear(hidden_channels, out_channels) + + def forward( + self, + game_x, + state_x, + edge_index_v_v, + edge_type_v_v, + edge_index_history_v_s, + edge_attr_history_v_s, + edge_index_in_v_s, + edge_index_s_s, + ): + game_x_type = self.conv1(game_x, edge_index_v_v, edge_type_v_v) + + game_x_tag = self.conv12(game_x, edge_index_v_v) + + # Simple aggregation from heteroconv + game_x = group([game_x_type, game_x_tag], self.aggr) + + state_x = self.conv3( + (game_x, state_x), + edge_index_history_v_s, + edge_attr_history_v_s, + ).relu() + + state_x = self.conv32( + (game_x, state_x), + edge_index_history_v_s, + ).relu() + + state_x = self.conv4( + (game_x, state_x), + edge_index_in_v_s, + ).relu() + + state_x = self.conv42( + (game_x, state_x), + edge_index_in_v_s, + ).relu() + + state_x = self.conv2( + state_x, + edge_index_s_s, + ).relu() + + state_x = self.conv5( + state_x, + edge_index_s_s, + ).relu() + + return self.lin(state_x) diff --git a/VSharp.ML.AIAgent/ml/models/TAG3EdgeTypeVerticesDouble/model.py b/VSharp.ML.AIAgent/ml/models/TAG3EdgeTypeVerticesDouble/model.py new file mode 100644 index 000000000..f2d9b15f8 --- /dev/null +++ b/VSharp.ML.AIAgent/ml/models/TAG3EdgeTypeVerticesDouble/model.py @@ -0,0 +1,65 @@ +import torch +from torch.nn import Linear +from torch_geometric.nn import TAGConv, SAGEConv, GraphConv, RGCNConv + + +class StateModelEncoder(torch.nn.Module): + def __init__(self, hidden_channels, out_channels): + super().__init__() + self.conv1 = RGCNConv(-1, hidden_channels, 3) + self.conv12 = TAGConv(5, hidden_channels, 3) + self.conv2 = TAGConv(hidden_channels, hidden_channels, 3) + self.conv3 = GraphConv((-1, -1), hidden_channels) + self.conv32 = SAGEConv((-1, -1), hidden_channels) + self.conv4 = SAGEConv((-1, -1), hidden_channels) + self.conv42 = SAGEConv((-1, -1), hidden_channels) + self.conv5 = SAGEConv(-1, hidden_channels) + self.lin = Linear(hidden_channels, out_channels) + + def forward( + self, + game_x, + state_x, + edge_index_v_v, + edge_type_v_v, + edge_index_history_v_s, + edge_attr_history_v_s, + edge_index_in_v_s, + edge_index_s_s, + ): + game_x = self.conv12(game_x, edge_index_v_v).relu() + + game_x = self.conv1(game_x, edge_index_v_v, edge_type_v_v).relu() + + state_x = self.conv3( + (game_x, state_x), + edge_index_history_v_s, + edge_attr_history_v_s, + ).relu() + + state_x = self.conv32( + (game_x, state_x), + edge_index_history_v_s, + ).relu() + + state_x = self.conv4( + (game_x, state_x), + edge_index_in_v_s, + ).relu() + + state_x = self.conv42( + (game_x, state_x), + edge_index_in_v_s, + ).relu() + + state_x = self.conv2( + state_x, + edge_index_s_s, + ).relu() + + state_x = self.conv5( + state_x, + edge_index_s_s, + ).relu() + + return self.lin(state_x) diff --git a/VSharp.ML.AIAgent/ml/models/TAGSageDouble/32ch/20e/GNN_state_pred_het_dict b/VSharp.ML.AIAgent/ml/models/TAGSageDouble/32ch/20e/GNN_state_pred_het_dict new file mode 100644 index 000000000..e972c068e Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/TAGSageDouble/32ch/20e/GNN_state_pred_het_dict differ diff --git a/VSharp.ML.AIAgent/ml/models/TAGSageDouble/32ch/20e/GNN_state_pred_het_full b/VSharp.ML.AIAgent/ml/models/TAGSageDouble/32ch/20e/GNN_state_pred_het_full new file mode 100644 index 000000000..c577f77bb Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/TAGSageDouble/32ch/20e/GNN_state_pred_het_full differ diff --git a/VSharp.ML.AIAgent/ml/models/TAGSageDouble/32ch/20e/train_res b/VSharp.ML.AIAgent/ml/models/TAGSageDouble/32ch/20e/train_res new file mode 100644 index 000000000..0c7fa36e4 --- /dev/null +++ b/VSharp.ML.AIAgent/ml/models/TAGSageDouble/32ch/20e/train_res @@ -0,0 +1,20 @@ +Epoch: 000, Train Loss: 0.072378, Test Loss: 0.073496 +Epoch: 001, Train Loss: 0.043047, Test Loss: 0.043577 +Epoch: 002, Train Loss: 0.034224, Test Loss: 0.034591 +Epoch: 003, Train Loss: 0.028296, Test Loss: 0.028690 +Epoch: 004, Train Loss: 0.024289, Test Loss: 0.024659 +Epoch: 005, Train Loss: 0.021720, Test Loss: 0.022099 +Epoch: 006, Train Loss: 0.020279, Test Loss: 0.020652 +Epoch: 007, Train Loss: 0.018290, Test Loss: 0.018627 +Epoch: 008, Train Loss: 0.016991, Test Loss: 0.017269 +Epoch: 009, Train Loss: 0.016168, Test Loss: 0.016476 +Epoch: 010, Train Loss: 0.014912, Test Loss: 0.015189 +Epoch: 011, Train Loss: 0.014065, Test Loss: 0.014356 +Epoch: 012, Train Loss: 0.014232, Test Loss: 0.014502 +Epoch: 013, Train Loss: 0.013865, Test Loss: 0.014091 +Epoch: 014, Train Loss: 0.013500, Test Loss: 0.013771 +Epoch: 015, Train Loss: 0.012756, Test Loss: 0.012998 +Epoch: 016, Train Loss: 0.012388, Test Loss: 0.012617 +Epoch: 017, Train Loss: 0.012210, Test Loss: 0.012405 +Epoch: 018, Train Loss: 0.012160, Test Loss: 0.012321 +Epoch: 019, Train Loss: 0.011690, Test Loss: 0.011885 diff --git a/VSharp.ML.AIAgent/ml/models/TAGSageDouble/64ch/100e/GNN_state_pred_het_dict b/VSharp.ML.AIAgent/ml/models/TAGSageDouble/64ch/100e/GNN_state_pred_het_dict new file mode 100644 index 000000000..0517348c5 Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/TAGSageDouble/64ch/100e/GNN_state_pred_het_dict differ diff --git a/VSharp.ML.AIAgent/ml/models/TAGSageDouble/64ch/100e/GNN_state_pred_het_full b/VSharp.ML.AIAgent/ml/models/TAGSageDouble/64ch/100e/GNN_state_pred_het_full new file mode 100644 index 000000000..a949058c6 Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/TAGSageDouble/64ch/100e/GNN_state_pred_het_full differ diff --git a/VSharp.ML.AIAgent/ml/models/TAGSageDouble/64ch/100e/train_res b/VSharp.ML.AIAgent/ml/models/TAGSageDouble/64ch/100e/train_res new file mode 100644 index 000000000..f15f73606 --- /dev/null +++ b/VSharp.ML.AIAgent/ml/models/TAGSageDouble/64ch/100e/train_res @@ -0,0 +1,100 @@ +Epoch: 000, Train Loss: 0.047441, Test Loss: 0.047307 +Epoch: 001, Train Loss: 0.027983, Test Loss: 0.028091 +Epoch: 002, Train Loss: 0.019875, Test Loss: 0.019800 +Epoch: 003, Train Loss: 0.015671, Test Loss: 0.015709 +Epoch: 004, Train Loss: 0.013007, Test Loss: 0.013053 +Epoch: 005, Train Loss: 0.010809, Test Loss: 0.010988 +Epoch: 006, Train Loss: 0.009938, Test Loss: 0.010142 +Epoch: 007, Train Loss: 0.009297, Test Loss: 0.009558 +Epoch: 008, Train Loss: 0.008306, Test Loss: 0.008476 +Epoch: 009, Train Loss: 0.007844, Test Loss: 0.008031 +Epoch: 010, Train Loss: 0.007373, Test Loss: 0.007566 +Epoch: 011, Train Loss: 0.007039, Test Loss: 0.007258 +Epoch: 012, Train Loss: 0.006610, Test Loss: 0.006775 +Epoch: 013, Train Loss: 0.006401, Test Loss: 0.006583 +Epoch: 014, Train Loss: 0.006156, Test Loss: 0.006349 +Epoch: 015, Train Loss: 0.006003, Test Loss: 0.006174 +Epoch: 016, Train Loss: 0.005762, Test Loss: 0.005955 +Epoch: 017, Train Loss: 0.005660, Test Loss: 0.005828 +Epoch: 018, Train Loss: 0.005280, Test Loss: 0.005431 +Epoch: 019, Train Loss: 0.005207, Test Loss: 0.005368 +Epoch: 020, Train Loss: 0.005102, Test Loss: 0.005245 +Epoch: 021, Train Loss: 0.004956, Test Loss: 0.005110 +Epoch: 022, Train Loss: 0.004837, Test Loss: 0.004973 +Epoch: 023, Train Loss: 0.004780, Test Loss: 0.004912 +Epoch: 024, Train Loss: 0.004628, Test Loss: 0.004775 +Epoch: 025, Train Loss: 0.004730, Test Loss: 0.004861 +Epoch: 026, Train Loss: 0.004443, Test Loss: 0.004571 +Epoch: 027, Train Loss: 0.004354, Test Loss: 0.004493 +Epoch: 028, Train Loss: 0.004315, Test Loss: 0.004458 +Epoch: 029, Train Loss: 0.004350, Test Loss: 0.004485 +Epoch: 030, Train Loss: 0.004211, Test Loss: 0.004352 +Epoch: 031, Train Loss: 0.004113, Test Loss: 0.004253 +Epoch: 032, Train Loss: 0.004068, Test Loss: 0.004211 +Epoch: 033, Train Loss: 0.004030, Test Loss: 0.004165 +Epoch: 034, Train Loss: 0.003899, Test Loss: 0.004036 +Epoch: 035, Train Loss: 0.003832, Test Loss: 0.003964 +Epoch: 036, Train Loss: 0.003761, Test Loss: 0.003910 +Epoch: 037, Train Loss: 0.003703, Test Loss: 0.003839 +Epoch: 038, Train Loss: 0.003704, Test Loss: 0.003837 +Epoch: 039, Train Loss: 0.003600, Test Loss: 0.003739 +Epoch: 040, Train Loss: 0.003544, Test Loss: 0.003683 +Epoch: 041, Train Loss: 0.003528, Test Loss: 0.003679 +Epoch: 042, Train Loss: 0.003515, Test Loss: 0.003656 +Epoch: 043, Train Loss: 0.003423, Test Loss: 0.003566 +Epoch: 044, Train Loss: 0.003475, Test Loss: 0.003613 +Epoch: 045, Train Loss: 0.003346, Test Loss: 0.003486 +Epoch: 046, Train Loss: 0.003336, Test Loss: 0.003475 +Epoch: 047, Train Loss: 0.003270, Test Loss: 0.003401 +Epoch: 048, Train Loss: 0.003265, Test Loss: 0.003391 +Epoch: 049, Train Loss: 0.003190, Test Loss: 0.003301 +Epoch: 050, Train Loss: 0.003140, Test Loss: 0.003256 +Epoch: 051, Train Loss: 0.003111, Test Loss: 0.003236 +Epoch: 052, Train Loss: 0.003114, Test Loss: 0.003231 +Epoch: 053, Train Loss: 0.003045, Test Loss: 0.003151 +Epoch: 054, Train Loss: 0.002983, Test Loss: 0.003090 +Epoch: 055, Train Loss: 0.002978, Test Loss: 0.003089 +Epoch: 056, Train Loss: 0.002931, Test Loss: 0.003043 +Epoch: 057, Train Loss: 0.002931, Test Loss: 0.003039 +Epoch: 058, Train Loss: 0.002922, Test Loss: 0.003025 +Epoch: 059, Train Loss: 0.002918, Test Loss: 0.003017 +Epoch: 060, Train Loss: 0.002894, Test Loss: 0.002988 +Epoch: 061, Train Loss: 0.002802, Test Loss: 0.002901 +Epoch: 062, Train Loss: 0.002828, Test Loss: 0.002917 +Epoch: 063, Train Loss: 0.002812, Test Loss: 0.002898 +Epoch: 064, Train Loss: 0.002941, Test Loss: 0.003026 +Epoch: 065, Train Loss: 0.002762, Test Loss: 0.002856 +Epoch: 066, Train Loss: 0.002782, Test Loss: 0.002879 +Epoch: 067, Train Loss: 0.002745, Test Loss: 0.002826 +Epoch: 068, Train Loss: 0.002705, Test Loss: 0.002787 +Epoch: 069, Train Loss: 0.002805, Test Loss: 0.002893 +Epoch: 070, Train Loss: 0.002758, Test Loss: 0.002843 +Epoch: 071, Train Loss: 0.002624, Test Loss: 0.002710 +Epoch: 072, Train Loss: 0.002774, Test Loss: 0.002854 +Epoch: 073, Train Loss: 0.002759, Test Loss: 0.002842 +Epoch: 074, Train Loss: 0.002614, Test Loss: 0.002701 +Epoch: 075, Train Loss: 0.002605, Test Loss: 0.002688 +Epoch: 076, Train Loss: 0.002582, Test Loss: 0.002659 +Epoch: 077, Train Loss: 0.002550, Test Loss: 0.002635 +Epoch: 078, Train Loss: 0.002603, Test Loss: 0.002696 +Epoch: 079, Train Loss: 0.002573, Test Loss: 0.002650 +Epoch: 080, Train Loss: 0.002495, Test Loss: 0.002565 +Epoch: 081, Train Loss: 0.002490, Test Loss: 0.002565 +Epoch: 082, Train Loss: 0.002456, Test Loss: 0.002534 +Epoch: 083, Train Loss: 0.002446, Test Loss: 0.002514 +Epoch: 084, Train Loss: 0.002489, Test Loss: 0.002568 +Epoch: 085, Train Loss: 0.002667, Test Loss: 0.002737 +Epoch: 086, Train Loss: 0.002421, Test Loss: 0.002483 +Epoch: 087, Train Loss: 0.002547, Test Loss: 0.002629 +Epoch: 088, Train Loss: 0.002356, Test Loss: 0.002422 +Epoch: 089, Train Loss: 0.002392, Test Loss: 0.002453 +Epoch: 090, Train Loss: 0.002351, Test Loss: 0.002427 +Epoch: 091, Train Loss: 0.002332, Test Loss: 0.002407 +Epoch: 092, Train Loss: 0.004105, Test Loss: 0.004236 +Epoch: 093, Train Loss: 0.002387, Test Loss: 0.002469 +Epoch: 094, Train Loss: 0.002316, Test Loss: 0.002390 +Epoch: 095, Train Loss: 0.002291, Test Loss: 0.002376 +Epoch: 096, Train Loss: 0.002354, Test Loss: 0.002421 +Epoch: 097, Train Loss: 0.002294, Test Loss: 0.002367 +Epoch: 098, Train Loss: 0.002244, Test Loss: 0.002338 +Epoch: 099, Train Loss: 0.002263, Test Loss: 0.002370 diff --git a/VSharp.ML.AIAgent/ml/models/TAGSageDouble/64ch/20e/GNN_state_pred_het_dict b/VSharp.ML.AIAgent/ml/models/TAGSageDouble/64ch/20e/GNN_state_pred_het_dict new file mode 100644 index 000000000..0c4201d5a Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/TAGSageDouble/64ch/20e/GNN_state_pred_het_dict differ diff --git a/VSharp.ML.AIAgent/ml/models/TAGSageDouble/64ch/20e/GNN_state_pred_het_full b/VSharp.ML.AIAgent/ml/models/TAGSageDouble/64ch/20e/GNN_state_pred_het_full new file mode 100644 index 000000000..80819e1d8 Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/TAGSageDouble/64ch/20e/GNN_state_pred_het_full differ diff --git a/VSharp.ML.AIAgent/ml/models/TAGSageDouble/64ch/20e/train_res b/VSharp.ML.AIAgent/ml/models/TAGSageDouble/64ch/20e/train_res new file mode 100644 index 000000000..f134a6c1d --- /dev/null +++ b/VSharp.ML.AIAgent/ml/models/TAGSageDouble/64ch/20e/train_res @@ -0,0 +1,20 @@ +Epoch: 000, Train Loss: 0.058653, Test Loss: 0.061931 +Epoch: 001, Train Loss: 0.033599, Test Loss: 0.035252 +Epoch: 002, Train Loss: 0.024017, Test Loss: 0.025401 +Epoch: 003, Train Loss: 0.018566, Test Loss: 0.020084 +Epoch: 004, Train Loss: 0.016096, Test Loss: 0.017555 +Epoch: 005, Train Loss: 0.014193, Test Loss: 0.015381 +Epoch: 006, Train Loss: 0.012142, Test Loss: 0.013107 +Epoch: 007, Train Loss: 0.011092, Test Loss: 0.011963 +Epoch: 008, Train Loss: 0.010418, Test Loss: 0.011354 +Epoch: 009, Train Loss: 0.008393, Test Loss: 0.008903 +Epoch: 010, Train Loss: 0.008293, Test Loss: 0.008749 +Epoch: 011, Train Loss: 0.007354, Test Loss: 0.007718 +Epoch: 012, Train Loss: 0.006936, Test Loss: 0.007338 +Epoch: 013, Train Loss: 0.006520, Test Loss: 0.006797 +Epoch: 014, Train Loss: 0.006349, Test Loss: 0.006665 +Epoch: 015, Train Loss: 0.005991, Test Loss: 0.006300 +Epoch: 016, Train Loss: 0.005723, Test Loss: 0.006024 +Epoch: 017, Train Loss: 0.005562, Test Loss: 0.005868 +Epoch: 018, Train Loss: 0.005451, Test Loss: 0.005739 +Epoch: 019, Train Loss: 0.005401, Test Loss: 0.005666 diff --git a/VSharp.ML.AIAgent/ml/models/TAGSageDouble/64ch/50e/GNN_state_pred_het_dict b/VSharp.ML.AIAgent/ml/models/TAGSageDouble/64ch/50e/GNN_state_pred_het_dict new file mode 100644 index 000000000..e7b24ee14 Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/TAGSageDouble/64ch/50e/GNN_state_pred_het_dict differ diff --git a/VSharp.ML.AIAgent/ml/models/TAGSageDouble/64ch/50e/GNN_state_pred_het_full b/VSharp.ML.AIAgent/ml/models/TAGSageDouble/64ch/50e/GNN_state_pred_het_full new file mode 100644 index 000000000..24f76d8fa Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/TAGSageDouble/64ch/50e/GNN_state_pred_het_full differ diff --git a/VSharp.ML.AIAgent/ml/models/TAGSageDouble/64ch/50e/train_res b/VSharp.ML.AIAgent/ml/models/TAGSageDouble/64ch/50e/train_res new file mode 100644 index 000000000..53b74e0f5 --- /dev/null +++ b/VSharp.ML.AIAgent/ml/models/TAGSageDouble/64ch/50e/train_res @@ -0,0 +1,50 @@ +Epoch: 000, Train Loss: 0.055234, Test Loss: 0.054694 +Epoch: 001, Train Loss: 0.025779, Test Loss: 0.025564 +Epoch: 002, Train Loss: 0.018780, Test Loss: 0.018762 +Epoch: 003, Train Loss: 0.014738, Test Loss: 0.014721 +Epoch: 004, Train Loss: 0.014752, Test Loss: 0.014738 +Epoch: 005, Train Loss: 0.011082, Test Loss: 0.011120 +Epoch: 006, Train Loss: 0.010220, Test Loss: 0.010273 +Epoch: 007, Train Loss: 0.009401, Test Loss: 0.009455 +Epoch: 008, Train Loss: 0.009154, Test Loss: 0.009261 +Epoch: 009, Train Loss: 0.008321, Test Loss: 0.008456 +Epoch: 010, Train Loss: 0.007499, Test Loss: 0.007654 +Epoch: 011, Train Loss: 0.007446, Test Loss: 0.007599 +Epoch: 012, Train Loss: 0.006967, Test Loss: 0.007105 +Epoch: 013, Train Loss: 0.006487, Test Loss: 0.006650 +Epoch: 014, Train Loss: 0.005996, Test Loss: 0.006176 +Epoch: 015, Train Loss: 0.005860, Test Loss: 0.006047 +Epoch: 016, Train Loss: 0.005585, Test Loss: 0.005773 +Epoch: 017, Train Loss: 0.005514, Test Loss: 0.005701 +Epoch: 018, Train Loss: 0.005469, Test Loss: 0.005659 +Epoch: 019, Train Loss: 0.005294, Test Loss: 0.005491 +Epoch: 020, Train Loss: 0.005149, Test Loss: 0.005359 +Epoch: 021, Train Loss: 0.005043, Test Loss: 0.005244 +Epoch: 022, Train Loss: 0.004933, Test Loss: 0.005152 +Epoch: 023, Train Loss: 0.004932, Test Loss: 0.005132 +Epoch: 024, Train Loss: 0.004678, Test Loss: 0.004873 +Epoch: 025, Train Loss: 0.004764, Test Loss: 0.004978 +Epoch: 026, Train Loss: 0.004552, Test Loss: 0.004733 +Epoch: 027, Train Loss: 0.004565, Test Loss: 0.004771 +Epoch: 028, Train Loss: 0.004463, Test Loss: 0.004661 +Epoch: 029, Train Loss: 0.004375, Test Loss: 0.004548 +Epoch: 030, Train Loss: 0.004280, Test Loss: 0.004473 +Epoch: 031, Train Loss: 0.004256, Test Loss: 0.004417 +Epoch: 032, Train Loss: 0.004228, Test Loss: 0.004398 +Epoch: 033, Train Loss: 0.004142, Test Loss: 0.004309 +Epoch: 034, Train Loss: 0.004160, Test Loss: 0.004335 +Epoch: 035, Train Loss: 0.004107, Test Loss: 0.004293 +Epoch: 036, Train Loss: 0.003954, Test Loss: 0.004124 +Epoch: 037, Train Loss: 0.003918, Test Loss: 0.004093 +Epoch: 038, Train Loss: 0.003874, Test Loss: 0.004027 +Epoch: 039, Train Loss: 0.003726, Test Loss: 0.003898 +Epoch: 040, Train Loss: 0.003805, Test Loss: 0.003967 +Epoch: 041, Train Loss: 0.003774, Test Loss: 0.003953 +Epoch: 042, Train Loss: 0.003694, Test Loss: 0.003852 +Epoch: 043, Train Loss: 0.003677, Test Loss: 0.003846 +Epoch: 044, Train Loss: 0.003673, Test Loss: 0.003846 +Epoch: 045, Train Loss: 0.003600, Test Loss: 0.003753 +Epoch: 046, Train Loss: 0.003545, Test Loss: 0.003703 +Epoch: 047, Train Loss: 0.003458, Test Loss: 0.003623 +Epoch: 048, Train Loss: 0.003457, Test Loss: 0.003616 +Epoch: 049, Train Loss: 0.003424, Test Loss: 0.003573 diff --git a/VSharp.ML.AIAgent/ml/models/TAGSageDouble/model.py b/VSharp.ML.AIAgent/ml/models/TAGSageDouble/model.py new file mode 100644 index 000000000..ddfb02525 --- /dev/null +++ b/VSharp.ML.AIAgent/ml/models/TAGSageDouble/model.py @@ -0,0 +1,65 @@ +import torch +from torch.nn import Linear +from torch_geometric.nn import TAGConv, SAGEConv, GraphConv + + +class StateModelEncoder(torch.nn.Module): + def __init__(self, hidden_channels, out_channels): + super().__init__() + self.conv1 = TAGConv(5, hidden_channels, 2) + self.conv2 = TAGConv(hidden_channels, hidden_channels, 3) # TAGConv + self.conv3 = GraphConv((-1, -1), hidden_channels) # SAGEConv + self.conv32 = SAGEConv((-1, -1), hidden_channels) # SAGEConv + self.conv4 = SAGEConv((-1, -1), hidden_channels) + self.conv42 = SAGEConv((-1, -1), hidden_channels) + self.conv5 = SAGEConv(-1, hidden_channels) + self.lin = Linear(hidden_channels, out_channels) + + def forward( + self, + game_x, + state_x, + edge_index_v_v, + edge_type_v_v, + edge_index_history_v_s, + edge_attr_history_v_s, + edge_index_in_v_s, + edge_index_s_s, + ): + game_x = self.conv1( + game_x, + edge_index_v_v, + ).relu() + + state_x = self.conv3( + (game_x, state_x), + edge_index_history_v_s, + edge_attr_history_v_s, + ).relu() + + state_x = self.conv32( + (game_x, state_x), + edge_index_history_v_s, + ).relu() + + state_x = self.conv4( + (game_x, state_x), + edge_index_in_v_s, + ).relu() + + state_x = self.conv42( + (game_x, state_x), + edge_index_in_v_s, + ).relu() + + state_x = self.conv2( + state_x, + edge_index_s_s, + ).relu() + + state_x = self.conv5( + state_x, + edge_index_s_s, + ).relu() + + return self.lin(state_x) diff --git a/VSharp.ML.AIAgent/ml/models/TAGSageSimple/32ch/1e/GNN_state_pred_het_dict b/VSharp.ML.AIAgent/ml/models/TAGSageSimple/32ch/1e/GNN_state_pred_het_dict new file mode 100644 index 000000000..0317e35e8 Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/TAGSageSimple/32ch/1e/GNN_state_pred_het_dict differ diff --git a/VSharp.ML.AIAgent/ml/models/TAGSageSimple/32ch/1e/GNN_state_pred_het_full b/VSharp.ML.AIAgent/ml/models/TAGSageSimple/32ch/1e/GNN_state_pred_het_full new file mode 100644 index 000000000..9ab9f6e00 Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/TAGSageSimple/32ch/1e/GNN_state_pred_het_full differ diff --git a/VSharp.ML.AIAgent/ml/models/TAGSageSimple/32ch/1e/train_res b/VSharp.ML.AIAgent/ml/models/TAGSageSimple/32ch/1e/train_res new file mode 100644 index 000000000..7d8b61772 --- /dev/null +++ b/VSharp.ML.AIAgent/ml/models/TAGSageSimple/32ch/1e/train_res @@ -0,0 +1 @@ +Epoch: 000, Train Loss: 0.141583, Test Loss: 0.145491 diff --git a/VSharp.ML.AIAgent/ml/models/TAGSageSimple/64ch/100e/GNN_state_pred_het_dict b/VSharp.ML.AIAgent/ml/models/TAGSageSimple/64ch/100e/GNN_state_pred_het_dict new file mode 100644 index 000000000..d185a743f Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/TAGSageSimple/64ch/100e/GNN_state_pred_het_dict differ diff --git a/VSharp.ML.AIAgent/ml/models/TAGSageSimple/64ch/100e/GNN_state_pred_het_full b/VSharp.ML.AIAgent/ml/models/TAGSageSimple/64ch/100e/GNN_state_pred_het_full new file mode 100644 index 000000000..56418e6a4 Binary files /dev/null and b/VSharp.ML.AIAgent/ml/models/TAGSageSimple/64ch/100e/GNN_state_pred_het_full differ diff --git a/VSharp.ML.AIAgent/ml/models/TAGSageSimple/64ch/100e/train_res b/VSharp.ML.AIAgent/ml/models/TAGSageSimple/64ch/100e/train_res new file mode 100644 index 000000000..70053e17f --- /dev/null +++ b/VSharp.ML.AIAgent/ml/models/TAGSageSimple/64ch/100e/train_res @@ -0,0 +1,100 @@ +Epoch: 000, Train Loss: 0.157131, Test Loss: 0.154543 +Epoch: 001, Train Loss: 0.098552, Test Loss: 0.098144 +Epoch: 002, Train Loss: 0.058471, Test Loss: 0.058878 +Epoch: 003, Train Loss: 0.035232, Test Loss: 0.036190 +Epoch: 004, Train Loss: 0.030188, Test Loss: 0.031279 +Epoch: 005, Train Loss: 0.025495, Test Loss: 0.026449 +Epoch: 006, Train Loss: 0.021157, Test Loss: 0.021989 +Epoch: 007, Train Loss: 0.018034, Test Loss: 0.018797 +Epoch: 008, Train Loss: 0.016052, Test Loss: 0.016735 +Epoch: 009, Train Loss: 0.013862, Test Loss: 0.014500 +Epoch: 010, Train Loss: 0.013893, Test Loss: 0.014568 +Epoch: 011, Train Loss: 0.012088, Test Loss: 0.012668 +Epoch: 012, Train Loss: 0.011636, Test Loss: 0.012203 +Epoch: 013, Train Loss: 0.010516, Test Loss: 0.011049 +Epoch: 014, Train Loss: 0.010369, Test Loss: 0.010918 +Epoch: 015, Train Loss: 0.010409, Test Loss: 0.010979 +Epoch: 016, Train Loss: 0.009740, Test Loss: 0.010261 +Epoch: 017, Train Loss: 0.009364, Test Loss: 0.009907 +Epoch: 018, Train Loss: 0.008932, Test Loss: 0.009460 +Epoch: 019, Train Loss: 0.008673, Test Loss: 0.009178 +Epoch: 020, Train Loss: 0.008576, Test Loss: 0.009127 +Epoch: 021, Train Loss: 0.008298, Test Loss: 0.008797 +Epoch: 022, Train Loss: 0.008163, Test Loss: 0.008679 +Epoch: 023, Train Loss: 0.007808, Test Loss: 0.008312 +Epoch: 024, Train Loss: 0.007841, Test Loss: 0.008336 +Epoch: 025, Train Loss: 0.007679, Test Loss: 0.008163 +Epoch: 026, Train Loss: 0.007780, Test Loss: 0.008302 +Epoch: 027, Train Loss: 0.007314, Test Loss: 0.007819 +Epoch: 028, Train Loss: 0.007237, Test Loss: 0.007702 +Epoch: 029, Train Loss: 0.007087, Test Loss: 0.007578 +Epoch: 030, Train Loss: 0.007126, Test Loss: 0.007616 +Epoch: 031, Train Loss: 0.007137, Test Loss: 0.007653 +Epoch: 032, Train Loss: 0.006848, Test Loss: 0.007340 +Epoch: 033, Train Loss: 0.007055, Test Loss: 0.007553 +Epoch: 034, Train Loss: 0.006828, Test Loss: 0.007334 +Epoch: 035, Train Loss: 0.007106, Test Loss: 0.007606 +Epoch: 036, Train Loss: 0.006768, Test Loss: 0.007280 +Epoch: 037, Train Loss: 0.006812, Test Loss: 0.007287 +Epoch: 038, Train Loss: 0.006672, Test Loss: 0.007155 +Epoch: 039, Train Loss: 0.006681, Test Loss: 0.007179 +Epoch: 040, Train Loss: 0.006532, Test Loss: 0.006999 +Epoch: 041, Train Loss: 0.006411, Test Loss: 0.006895 +Epoch: 042, Train Loss: 0.006475, Test Loss: 0.006952 +Epoch: 043, Train Loss: 0.006204, Test Loss: 0.006666 +Epoch: 044, Train Loss: 0.006532, Test Loss: 0.007020 +Epoch: 045, Train Loss: 0.006301, Test Loss: 0.006781 +Epoch: 046, Train Loss: 0.006522, Test Loss: 0.007000 +Epoch: 047, Train Loss: 0.006169, Test Loss: 0.006638 +Epoch: 048, Train Loss: 0.006223, Test Loss: 0.006711 +Epoch: 049, Train Loss: 0.006154, Test Loss: 0.006613 +Epoch: 050, Train Loss: 0.006034, Test Loss: 0.006487 +Epoch: 051, Train Loss: 0.005892, Test Loss: 0.006337 +Epoch: 052, Train Loss: 0.005901, Test Loss: 0.006364 +Epoch: 053, Train Loss: 0.005845, Test Loss: 0.006290 +Epoch: 054, Train Loss: 0.005760, Test Loss: 0.006203 +Epoch: 055, Train Loss: 0.005748, Test Loss: 0.006193 +Epoch: 056, Train Loss: 0.005769, Test Loss: 0.006222 +Epoch: 057, Train Loss: 0.005767, Test Loss: 0.006212 +Epoch: 058, Train Loss: 0.005626, Test Loss: 0.006068 +Epoch: 059, Train Loss: 0.005634, Test Loss: 0.006081 +Epoch: 060, Train Loss: 0.005587, Test Loss: 0.006025 +Epoch: 061, Train Loss: 0.005636, Test Loss: 0.006081 +Epoch: 062, Train Loss: 0.005548, Test Loss: 0.005984 +Epoch: 063, Train Loss: 0.005537, Test Loss: 0.005984 +Epoch: 064, Train Loss: 0.005599, Test Loss: 0.006040 +Epoch: 065, Train Loss: 0.005472, Test Loss: 0.005919 +Epoch: 066, Train Loss: 0.005603, Test Loss: 0.006047 +Epoch: 067, Train Loss: 0.005490, Test Loss: 0.005923 +Epoch: 068, Train Loss: 0.005510, Test Loss: 0.005952 +Epoch: 069, Train Loss: 0.005508, Test Loss: 0.005946 +Epoch: 070, Train Loss: 0.005468, Test Loss: 0.005900 +Epoch: 071, Train Loss: 0.005552, Test Loss: 0.005983 +Epoch: 072, Train Loss: 0.005471, Test Loss: 0.005900 +Epoch: 073, Train Loss: 0.005464, Test Loss: 0.005895 +Epoch: 074, Train Loss: 0.005370, Test Loss: 0.005805 +Epoch: 075, Train Loss: 0.005378, Test Loss: 0.005806 +Epoch: 076, Train Loss: 0.005339, Test Loss: 0.005761 +Epoch: 077, Train Loss: 0.005340, Test Loss: 0.005768 +Epoch: 078, Train Loss: 0.005336, Test Loss: 0.005761 +Epoch: 079, Train Loss: 0.005173, Test Loss: 0.005591 +Epoch: 080, Train Loss: 0.006109, Test Loss: 0.006509 +Epoch: 081, Train Loss: 0.005199, Test Loss: 0.005631 +Epoch: 082, Train Loss: 0.005223, Test Loss: 0.005650 +Epoch: 083, Train Loss: 0.005174, Test Loss: 0.005601 +Epoch: 084, Train Loss: 0.005173, Test Loss: 0.005596 +Epoch: 085, Train Loss: 0.005184, Test Loss: 0.005621 +Epoch: 086, Train Loss: 0.005072, Test Loss: 0.005494 +Epoch: 087, Train Loss: 0.005169, Test Loss: 0.005592 +Epoch: 088, Train Loss: 0.005075, Test Loss: 0.005493 +Epoch: 089, Train Loss: 0.005147, Test Loss: 0.005573 +Epoch: 090, Train Loss: 0.005055, Test Loss: 0.005480 +Epoch: 091, Train Loss: 0.005016, Test Loss: 0.005435 +Epoch: 092, Train Loss: 0.004976, Test Loss: 0.005393 +Epoch: 093, Train Loss: 0.004917, Test Loss: 0.005333 +Epoch: 094, Train Loss: 0.004976, Test Loss: 0.005390 +Epoch: 095, Train Loss: 0.004945, Test Loss: 0.005362 +Epoch: 096, Train Loss: 0.004942, Test Loss: 0.005363 +Epoch: 097, Train Loss: 0.004931, Test Loss: 0.005351 +Epoch: 098, Train Loss: 0.004944, Test Loss: 0.005358 +Epoch: 099, Train Loss: 0.004955, Test Loss: 0.005367