|
- import numpy as np
- import torch
- from torch import device, nn
- from sklearn.metrics import mean_squared_error
- # from input_data import *
- # from model_1 import *
- # from model_past import *
-
-
-
- import random
-
- import numpy as np
- import torch
- from tqdm import tqdm
- from torch.autograd import Variable
- from torch.utils.data import Dataset, DataLoader
- class ConvNet(nn.Module):
- def __init__(self,input_dim, hidden_dim, kernel_size1,padding1,kernel_size2,padding2):
- super(ConvNet, self).__init__()
- self.input_dim = input_dim
- self.hidden_dim = hidden_dim
- self.kernel_size1 = kernel_size1
- self.padding1 = padding1
-
- self.kernel_size2 = kernel_size2
- # self.kernel_size3 = kernel_size3
- # self.kernel_size4 = kernel_size4
- self.padding2 = padding2
- # self.padding3 = padding3
- # self.padding4 = padding4
- self.conv1 = nn.Sequential(nn.Conv3d(in_channels=self.input_dim ,
- out_channels=16,
- kernel_size=kernel_size1,
- padding=self.padding1,
- ))
- self.conv2 = nn.Sequential(nn.Conv3d(in_channels=16,
- out_channels=1,
- kernel_size=kernel_size2,
- padding=self.padding2,
- ))
- # self.conv3 = nn.Sequential(nn.Conv3d(in_channels=32,
- # out_channels=64,
- # kernel_size=kernel_size3,
- # padding=self.padding3,
- # ))
- # self.conv4 = nn.Sequential(nn.Conv3d(in_channels=64,
- # out_channels=1,
- # kernel_size=kernel_size4,
- # padding=self.padding4,
- # ))
- # self.conv5 = nn.Sequential(nn.Conv3d(in_channels=128,
- # out_channels=1,
- # kernel_size=kernel_size4,
- # padding=self.padding4,
- # ))
- # self.fc = nn.Linear
-
- # 前馈网络过程
- def forward(self, x): #x 为(batch, channel, h, w)
-
- out = self.conv1(x) #x 为(batch, channel, h, w)
- out = self.conv2(out)
- # out = self.conv3(out)
- # out = self.conv4(out)
- # out = self.conv5(out)
- return out
-
- def setup_seed(seed):
- torch.manual_seed(seed)
- torch.cuda.manual_seed_all(seed)
- np.random.seed(seed)
- random.seed(seed)
- torch.backends.cudnn.deterministic = True
-
-
- year_num = 0
- year_true_num = 3646
-
- for data_append in range(7,16):
- for seed_num in range(70,90):
- setup_seed(seed_num)
-
-
-
- data = np.load(r'/dataset/all_variables_ssh_wind_openi.npz')
- sst = data['sst'][year_num:].reshape(-1, 1, 6, 27)
- # data = np.load(r'D:\PINN_kuokan-SST\3day_for_Nday\7day_for_Nday_data_1993-2019_Nino_area1.npz')
- print(data.files) #['dlwrf1', 'dswrf1', 'lhtfl1', 'sst_label', 'mld1', 'shtfl1', 'sst_feature', 'ulwrf1', 'uswrf1', 'vflx1', 'T_d1', 'v_d1', 'u_d1', 'uflx1']
- dlwrf1 = data['dlwrf'][year_num:]
- dswrf1 = data['dswrf'][year_num:]
- lhtfl1 = data['lhtfl'][year_num:]
- sst_label = data['sst_label'][year_num:]
- mld1 = data['mld'][year_num:]
- shtfl1 = data['shtfl'][year_num:]
- sst_feature = data['sst_feature'][year_num:]
- ulwrf1 = data['ulwrf'][year_num:]
- uswrf1 = data['uswrf'][year_num:]
- vflx1 = data['vflx'][year_num:]
- T_d1 = data['T_d'][year_num:]
- v_d1 = data['v_d'][year_num:]
- u_d1 = data['u_d'][year_num:]
- uflx1 = data['uflx'][year_num:]
- # xx1 = data['xx1'][6203:]
- # yy1 = data['yy1'][6203:]
- print(uflx1.shape)
-
- ssh1 = data['ssh'][year_num:]
- ssha1 = data['ssha'][year_num:]
- eastward_wind1 = data['eastward_wind'][year_num:]
- northward_wind1 = data['northward_wind'][year_num:]
- surface_downward_northward_stress1 = data['surface_downward_northward_stress'][year_num:]
- surface_downward_eastward_stress1 = data['surface_downward_eastward_stress'][year_num:]
-
-
- Qshortwave = dswrf1 - uswrf1
- Qlongwave = dlwrf1 - ulwrf1
- train_size = 2432
- valid_size = 2560 #前20% 作为验证 剩下的20%的作为测试
-
- ssh = ssh1.reshape(-1, 1, 7, 6, 27)
- ssh = torch.Tensor(ssh)
- ssh_train = ssh[0:train_size, :, :, :, :]
- ssh_valid = ssh[train_size:valid_size, :, :, :, :]
- ssh_test = ssh[valid_size:year_true_num, :, :, :, :]
-
- ssha = ssha1.reshape(-1, 1, 7, 6, 27)
- ssha = torch.Tensor(ssha)
- ssha_train = ssha[0:train_size, :, :, :, :]
- ssha_valid = ssha[train_size:valid_size, :, :, :, :]
- ssha_test = ssha[valid_size:year_true_num, :, :, :, :]
-
- eastward_wind = eastward_wind1.reshape(-1, 1, 7, 6, 27)
- eastward_wind = torch.Tensor(eastward_wind)
- eastward_wind_train = eastward_wind[0:train_size, :, :, :, :]
- eastward_wind_valid = eastward_wind[train_size:valid_size, :, :, :, :]
- eastward_wind_test = eastward_wind[valid_size:year_true_num, :, :, :, :]
-
- northward_wind = northward_wind1.reshape(-1, 1, 7, 6, 27)
- northward_wind = torch.Tensor(northward_wind)
- northward_wind_train = northward_wind[0:train_size, :, :, :, :]
- northward_wind_valid = northward_wind[train_size:valid_size, :, :, :, :]
- northward_wind_test = northward_wind[valid_size:year_true_num, :, :, :, :]
-
- surface_downward_northward_stress = surface_downward_northward_stress1.reshape(-1, 1, 7, 6, 27)
- surface_downward_northward_stress = torch.Tensor(surface_downward_northward_stress)
- surface_downward_northward_stress_train = surface_downward_northward_stress[0:train_size, :, :, :, :]
- surface_downward_northward_stress_valid = surface_downward_northward_stress[train_size:valid_size, :, :, :, :]
- surface_downward_northward_stress_test = surface_downward_northward_stress[valid_size:year_true_num, :, :, :, :]
-
- surface_downward_eastward_stress = surface_downward_eastward_stress1.reshape(-1, 1, 7, 6, 27)
- surface_downward_eastward_stress = torch.Tensor(surface_downward_eastward_stress)
- surface_downward_eastward_stress_train = surface_downward_eastward_stress[0:train_size, :, :, :, :]
- surface_downward_eastward_stress_valid = surface_downward_eastward_stress[train_size:valid_size, :, :, :, :]
- surface_downward_eastward_stress_test = surface_downward_eastward_stress[valid_size:year_true_num, :, :, :, :]
-
-
- u = uflx1.reshape(-1, 1, 7, 6, 27)
- u = torch.Tensor(u)
- u_train = u[0:train_size,:,:,:,:]
- u_valid = u[train_size:valid_size,:,:,:,:]
- u_test = u[valid_size:year_true_num,:,:,:,:]
- #
- # print('u.shape:{}'.format(u.shape))
- #
- v = vflx1.reshape(-1, 1, 7, 6, 27)
- v = torch.Tensor(v)
- v_train = v[0:train_size,:,:,:,:]
- v_valid = v[train_size:valid_size,:,:,:,:]
- v_test = v[valid_size:year_true_num,:,:,:,:]
- #
- # # print('v.shape:{}'.format(v.shape))
-
- Qnet = Qshortwave + Qlongwave + lhtfl1 + shtfl1
- Qnet = Qnet.reshape(-1, 1, 7, 6, 27)
- Qnet = torch.Tensor(Qnet)
- Qnet_train = Qnet[0:train_size,:,:,:,:]
- Qnet_valid = Qnet[train_size:valid_size,:,:,:,:]
- Qnet_test = Qnet[valid_size:year_true_num,:,:,:,:]
- #
- print('Qnet.shape:{}'.format(Qnet.shape))
-
- mld = mld1.reshape(-1, 1, 7, 6, 27)
- mld = torch.Tensor(mld)
- mld_train = mld[0:train_size,:,:,:]
- mld_valid = mld[train_size:valid_size,:,:,:]
- mld_test = mld[valid_size:year_true_num,:,:,:]
-
- sst1 = sst_feature.reshape(-1, 1, 7, 6, 27)
- sst1 = torch.Tensor(sst1)
- sst1_train = sst1[0:train_size,:,:,:,:]
- sst1_valid = sst1[train_size:valid_size,:,:,:,:]
- sst1_test = sst1[valid_size:year_true_num,:,:,:,:]
- # print('sst1_test.shape:{}'.format(sst1_test.shape)) #([541, 1, 5, 12, 15])
- #
- T_d = T_d1.reshape(-1, 1, 7, 6, 27)
- T_d = torch.Tensor(T_d)
- T_d_train = T_d[0:train_size,:,:,:,:]
- T_d_valid = T_d[train_size:valid_size,:,:,:,:]
- T_d_test = T_d[valid_size:year_true_num,:,:,:,:]
- #
- u_d = u_d1.reshape(-1, 1, 7, 6, 27)
- u_d = torch.Tensor(u_d)
- u_d_train = u_d[0:train_size,:,:,:,:]
- u_d_valid = u_d[train_size:valid_size,:,:,:,:]
- u_d_test = u_d[valid_size:year_true_num,:,:,:,:]
- #
- v_d = v_d1.reshape(-1, 1, 7, 6, 27)
- v_d = torch.Tensor(v_d)
- v_d_train = v_d[0:train_size,:,:,:,:]
- v_d_valid = v_d[train_size:valid_size,:,:,:,:]
- v_d_test = v_d[valid_size:year_true_num,:,:,:,:]
- #
-
- train_data = torch.cat((sst1_train, Qnet_train,mld_train,u_train,v_train,T_d_train,v_d_train,
- surface_downward_eastward_stress_train), dim=1) #train_data.shape:torch.Size([1184, 9, 7, 12, 15])
-
-
- print('train_data.shape:{}'.format(train_data.shape))
-
- valid_data = torch.cat((sst1_valid, Qnet_valid,mld_valid,u_valid,v_valid,T_d_valid,v_d_valid,
- surface_downward_eastward_stress_valid), dim=1)
- # print('valid_data.shape:{}'.format(valid_data.shape)) #([326, 3, 12, 15])
-
- test_data = torch.cat((sst1_test, Qnet_test,mld_test,u_test,v_test,T_d_test,v_d_test,
- surface_downward_eastward_stress_test), dim=1)
- # print('test_data.shape:{}'.format(test_data.shape)) #([326, 3, 12, 15])
-
- train_label = sst[data_append:train_size + data_append,:,:,:] #(1184, 1, 12, 15)
-
- valid_label = sst[train_size + data_append:valid_size + data_append,:,:,:]
-
-
- test_label = sst[valid_size + data_append: year_true_num,:,:,:]
-
-
- print('test_label.shape:{}'.format(test_label.shape)) # test_label.shape:(63, 1, 6, 27)
-
- train_label = torch.Tensor(train_label)
- valid_label = torch.Tensor(valid_label)
- test_label = torch.Tensor(test_label)
- print('train_label.shape:{}'.format(train_label.shape)) #train_label.shape:torch.Size([224, 1, 6, 27])
- test_label11 = test_label
- valid_label11 = valid_label
-
- #构建数据管道
- class MyDataset(Dataset):
- def __init__(self, data, label):
- self.data = torch.Tensor(data)
- self.label = torch.Tensor(label)
-
- def __len__(self):
- return len(self.label)
-
- def __getitem__(self, idx):
- return self.data[idx], self.label[idx]
-
-
- batch_size1 = 32
- batch_size2 = 32
- batch_size3 = 3000
- # trainset = MyDataset(input_data, sst)
- # trainset = MyDataset(train_data,train_label)
- # trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True)
- # batch_size = 1826
- print(train_data.shape)
- print(train_label.shape)
-
- trainset = MyDataset(train_data, train_label)
- trainloader = DataLoader(trainset, batch_size=batch_size1, shuffle=True, drop_last=False,pin_memory=True, num_workers=4)
-
- validset = MyDataset(valid_data, valid_label)
- validloader = DataLoader(validset, batch_size=batch_size2, shuffle=True, drop_last=False,pin_memory=True, num_workers=4)
-
- testset = MyDataset(test_data, test_label)
- testloader = DataLoader(testset, batch_size=batch_size3, shuffle=False, drop_last=False,pin_memory=True, num_workers=4)
-
- # print('Qnet_train.shape:{}'.format(sst1_train.shape))
-
-
-
-
- model_weights1 = '/model/last_cnn_hidd16_epo300_with_east_wind_stress_lr1e-3_lay2_e{}_ahead_{}day_model_weights.pth'.format(seed_num,data_append - 6)
-
- print('-----------------------train_black_att_3dcnn_convlstm--------------------------')
-
- model = ConvNet(input_dim=8,hidden_dim=1,kernel_size1=(7,3,3),padding1=(0,1,1),kernel_size2=(1,3,3),padding2=(0,1,1)).cuda()
-
-
- model = model.cuda()
- # out = model(a) #torch.Size([1826, 12, 15])
- # print(out.shape)
- criterion = nn.MSELoss()
- # 定义优化器
- optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
-
- epochs = 300
- train_losses, valid_losses = [], []
- # best_loss = 2
- best_score = float('inf')
- best_score1 = float('inf')
- preds = np.zeros((128,6,27))
- preds = np.expand_dims(preds, axis=1)#preds.reshape(100,1,12,15)
- # preds = np.zeros((18000,1))
- print(preds.shape)
- sores = []
- def rmse(y_true, y_preds):
- return np.sqrt(mean_squared_error(y_pred = y_preds, y_true = y_true))
-
- for epoch in range(epochs):
- print('Epoch: {}/{}'.format(epoch + 1, epochs))
- # print(var_y)
- #模型训练
- model.train()
- losses = 0
- for data, label in tqdm(trainloader):
- data = data.cuda()
- label = label.cuda()
- optimizer.zero_grad()
- print(data.shape) # torch.Size([2432, 13, 7, 6, 27])
-
- print('data.shape:{}'.format(data.shape))
- data = data.reshape(-1, 8, 7, 6, 27)
-
- pred = model(data)
- pred = pred.reshape(-1,1,6,27)
- print('pred_shape:{}'.format(pred.shape)) # torch.Size([2, 1, 12, 15])
- loss = criterion(pred, label)
- # losses +=loss.cpu().detach().numpy
- losses += loss
-
-
- # 反向传播
- loss.backward()
- optimizer.step()
- train_loss = losses / len(trainloader)
- train_losses.append(train_loss)
- print('Training Loss: {:.3f}'.format(train_loss))
-
- # # 模型验证
- # model.eval()
- losses = 0
-
- with torch.no_grad():
- for i, data in tqdm(enumerate(validloader)):
- # print('i:{}'.format(i))
- data, labels = data
- data = data.cuda()
- label = label.cuda()
- optimizer.zero_grad()
- print(data.shape) #torch.Size([2432, 13, 7, 6, 27])
-
- print('data.shape:{}'.format(data.shape))
- data = data.reshape(-1, 8, 7, 6, 27)
- pred = model(data)
- pred = pred.reshape(-1,1,6,27)
- print('pred_shape:{}'.format(pred.shape)) # torch.Size([2, 1, 12, 15])
- loss = criterion(pred, label)
- losses += loss
- # pred = pred.reshape(-1,1,6,27)
- pred = pred.reshape(-1,1,6,27)
- preds[i * batch_size2:(i + 1) * batch_size2] = pred.cpu()
- # print(preds.shape)
- # preds[i * batch_size2:(i + 1) * batch_size2] = np.array(tmp)
- valid_loss = losses / len(validloader)
-
- valid_losses.append(valid_loss)
- # print('Validation Loss: {:.3f}'.format(valid_loss))
-
- valid_label1 = valid_label.reshape(-1,1)
- preds1 = preds.reshape(-1,1)
- print('valid_label1.shape:{}'.format(valid_label1.shape))#(18000,1)
- print('preds.shape:{}0'.format(preds.shape)) #(360,1)
-
- s = rmse(valid_label1,preds1)
- sores.append(s)
- print('Score: {:.3f}'.format(s))
- # 保存最佳模型权重
-
- if valid_loss < best_score1: # 求s的最小值 ---》最大值反过来 inf符号也要反过来
- best_score1 = valid_loss
- checkpoint = {'best_score': valid_loss,
- 'state_dict': model.state_dict()}
- torch.save(checkpoint, model_weights1) # if valid_loss < best_loss:
- best_loss = valid_loss
- torch.save(model.state_dict(),
- '/model/last_cnn_hidd16_model_with_east_wind_stress_epo300_layer2_lr1e-3_ahead_{}_day_e{}.pt'.format(data_append-6,seed_num))
-
- print(sores)
- print(best_score)
- print(s)
-
-
-
-
-
|