|
- # ConvLSTM Cell
- import random
-
- import numpy as np
- import torch
- from tqdm import tqdm
- from sklearn.metrics import mean_squared_error
- from torch import nn
- from torch.utils.data import Dataset, DataLoader
-
-
- class ConvLSTMCell(nn.Module):
- def __init__(self, input_dim, hidden_dim, kernel_size):
- super().__init__()
- self.input_dim = input_dim
- self.hidden_dim = hidden_dim
- pad = kernel_size[0] // 2, kernel_size[1] // 2, kernel_size[2] // 2
- # 卷积操作Wx*Xt+Wh*Ht-1
- self.conv = nn.Conv3d(in_channels=input_dim + hidden_dim, out_channels=4 * hidden_dim, kernel_size=kernel_size,
- padding=pad)
-
- def initialize(self, inputs):
- # device = inputs.device
- # print('imputsss.shape:{}'.format(inputs.shape)) # ([1, 1, 41, 81]) ([1, 16, 41, 81]) ([1, 16, 41, 81])
- # inputs = np.array(inputs)
- # inputs = np.expand_dims(inputs, axis=1)
- # inputs = torch.Tensor(inputs)
- inputs = torch.Tensor(inputs)
- # print('imputsss.shape:{}'.format(inputs.shape)) # ([1, 1, 41, 81]) ([1, 16, 41, 81]) ([1, 16, 41, 81])
-
- N, _, D, H, W = inputs.size()
- # 初始化隐藏层状态Ht
- self.hidden_state = torch.zeros(N, self.hidden_dim, D, H, W).cuda()
- # 初始化记忆细胞状态ct
- self.cell_state = torch.zeros(N, self.hidden_dim, D, H, W).cuda()
- # 初始化记忆单元状态Mt
- self.memory_state = torch.zeros(N, self.hidden_dim, D, H, W).cuda()
-
- def forward(self, inputs, first_step=False):
- # 如果当前是第一个时间步,初始化Ht、ct、Mt
- if first_step:
- self.initialize(inputs)
-
- # ConvLSTM部分
- # 拼接Xt和Ht
- # inputs = np.expand_dims(inputs, axis=1)
- # self.hidden_state = np.expand_dims(self.hidden_state, axis=1)
-
- inputs = torch.Tensor(inputs).cuda()
- combined = torch.cat([inputs, self.hidden_state], dim=1) # (N, C, H, W), C=input_dim+hidden_dim
-
- # 进行卷积操作
- combined_conv = self.conv(combined)
-
- # 得到四个门控单元it、ft、ot、gt
- cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1)
- i = torch.sigmoid(cc_i)
- f = torch.sigmoid(cc_f)
- o = torch.sigmoid(cc_o)
- g = torch.tanh(cc_g)
- # 得到当前时间步的记忆细胞状态ct=ft·ct-1+it·gt
- self.cell_state = f * self.cell_state + i * g
- # 得到当前时间步的隐藏层状态Ht=ot·tanh(ct)
- self.hidden_state = o * torch.tanh(self.cell_state)
-
-
- return self.hidden_state
-
-
- # 构建3dcnn-ConvLSTM模型 #输入为(1184,5,7,12,15)
- class ConvLSTM(nn.Module):
- def __init__(self, input_dim, hidden_dim, kernel_size):
- super().__init__()
- self.input_dim = input_dim
- self.hidden_dim = hidden_dim
- self.num_layers = len(hidden_dim)
- # self.num_layers = 1
- self.conv1 = torch.nn.Sequential(
- torch.nn.Conv3d(46, 7, 3, stride=1, padding=1),
- # 4,4 3d输入格式(batch_size, channel, depth, height, width) (in_channel,out_channel,kernel_size,strid, pad)
- # torch.nn.BatchNorm3d(1),
- )
-
- layers = []
- for i in range(self.num_layers):
- cur_input_dim = self.input_dim if i == 0 else self.hidden_dim[i - 1]
- # print('cur_input_dim.shape:{}'.format(cur_input_dim)) # 1 16 16
- layers.append(ConvLSTMCell(input_dim=cur_input_dim, hidden_dim=self.hidden_dim[i],
- kernel_size=kernel_size))
- self.layers = nn.ModuleList(layers)
-
- self.conv_output = nn.Conv3d(self.hidden_dim[-1], 1, kernel_size=1)
-
- def forward(self, input_x, input_frames=7, future_frames=1, output_frames=7,
- teacher_forcing=False, scheduled_sampling_ratio=0, train=True):
- # input_x = input_x.permute(0, 1, 4, 2, 3).contiguous()
- # print('x_in.shape:{}'.format(input_x.shape))
- x = self.conv1(input_x) # 把 channel变成1 输入(N,C,B,H,W)
- # print('x.conv1.shape:{}'.format(x.shape))
- x1 = x.permute(0, 2, 3, 4, 1) #(N,C,B,H,W) --->(N,B,H,W,C) 把channel放到最后
- input_x = x1.permute(0, 1, 4, 2, 3).contiguous()
- # print('x1.shape:{}'.format(x1.shape))
-
- total_steps = input_frames + future_frames - 1
- outputs = [None] * total_steps
-
- # 对于每一个时间步
- for t in range(total_steps):
- # 在前12个月,使用每个月的输入样本Xt
- if t < input_frames:
- input_ = input_x[:, t].detach().cpu().numpy()
- input_ = np.expand_dims(input_, axis=1)
- input_ = torch.Tensor(input_)
- # 若不使用teacher forcing,则以上一个时间步的预测标签作为当前时间步的输入
- elif not teacher_forcing:
- input_ = outputs[t - 1]
- # 若使用teacher forcing,则以ratio的概率使用上一个时间步的实际标签作为当前时间步的输入
- # else:
- # mask = teacher_forcing_mask[:, t - input_frames].float().to(device)
- # input_ = input_x[:, t].to(device) * mask + outputs[t - 1] * (1 - mask)
- first_step = (t == 0)
- input_ = input_.float()
-
- # 将当前时间步的输入通过隐藏层
- for layer_idx in range(self.num_layers):
- input_ = input_.detach().cpu().numpy()
- input_ = self.layers[layer_idx](input_, first_step=first_step)
- # print('input_.shape:{}'.format(input_.shape))
- # 记录每个时间步的输出
- if train or (t >= (input_frames - 1)):
- # print('input_.shape:{}'.format(input_.shape))
- outputs[t] = self.conv_output(input_)
- # print('outputs.shape:{}'.format(outputs[t].shape))
-
- outputs = [x for x in outputs if x is not None]
-
- # 确认输出序列的长度
- if train:
- assert len(outputs) == output_frames
- else:
- assert len(outputs) == future_frames
-
- # 得到sst的预测序列
- outputs = torch.stack(outputs, dim=1)[:, :, 0] # (N, 37, H, W)
- outputs1 = outputs[:,-future_frames:,:,:]
- # 对sst的预测序列在nino3.4区域取三个月的平均值就得到nino3.4指数的预测序列
- # nino_pred = outputs[:, -future_frames:, 10:13, 19:30].mean(dim=[2, 3]) # (N, 26)
- # nino_pred = nino_pred.unfold(dimension=1, size=3, step=1).mean(dim=2) # (N, 24)
- outputs1 = outputs1.squeeze()
- return outputs1
-
- data = np.load(r'/dataset/7day_for_Nday_data_09_17_atlantic_openi_ICDM_area_last.npz')
- print(data.files) #['hycom_temp', 'slfh', 'sshf', 'ssr', 'str', 'mld', 'u', 'v', 'T_d', 'u_d', 'v_d', 'xx', 'yy', 'analysis_temp', 'dT_dt', 'dT_dx', 'dT_dy']
-
-
-
-
- hycom_temp = data['hycom_temp'][:2634, :, :, :40, :80] # (3281, 7, 7, 41, 201)
- slfh = data['slfh'][:2634, :, :40, :80] # (3281, 7, 41, 201)
- sshf = data['sshf'][:2634, :, :40, :80] # (3281, 7, 41, 201)
- ssr = data['ssr'][:2634, :, :40, :80] # (3281, 7, 41, 201)
- str = data['str'][:2634, :, :40, :80] # (3281, 7, 41, 201)
- mld = data['mld'][:2634, :, :40, :80] # (3281, 7, 41, 201)
- analysis_temp = data['analysis_temp'][:2634,:,:,:40,:80] # (3281, 7, 7, 41, 201)
- u = data['u'][:2634, :, :, :40, :80] # (3281, 7, 7, 41, 201)
- v = data['v'][:2634, :, :, :40, :80] # (3281, 7, 7, 41, 201)
- T_d = data['T_d'][:2634, :, :40, :80] # (3281, 7, 41, 201)
- u_d = data['u_d'][:2634, :, :40, :80] # (3281, 7, 41, 201)
- v_d = data['v_d'][:2634, :, :40, :80] # (3281, 7, 41, 201)
- dT_dt = data['dT_dt'][:2634,:,:,:40,:80]
- dT_dx = data['dT_dx'][:2634,:,:,:40,:80]
- dT_dy = data['dT_dy'][:2634,:,:,:40,:80]
-
-
- analysis_temp = analysis_temp.transpose(0,2,1,3,4)
-
-
- train_size = 1952 # 前60%
- valid_size = 2624 # 中间20% 作为验证 剩下的20%的作为测试
-
- Q_net = (slfh + sshf + ssr + str)/86400
-
- hycom_temp = hycom_temp[:,:,:,:40,:80]
- hycom_temp = torch.Tensor(hycom_temp)
- hycom_temp = hycom_temp.permute(0, 2, 1, 3, 4)
- hycom_temp = hycom_temp.reshape(-1, 7, 7, 40, 80)
- hycom_temp = torch.Tensor(hycom_temp)
- hycom_temp_train = hycom_temp[0:train_size, :, :, :, :]
- hycom_temp_valid = hycom_temp[train_size:valid_size, :, :, :, :]
-
- Q_net = Q_net.reshape(-1, 1, 7, 40, 80)
- Q_net = torch.Tensor(Q_net)
- Q_net_train = Q_net[0:train_size, :, :, :, :]
- Q_net_valid = Q_net[train_size:valid_size, :, :, :, :]
-
- mld = mld.reshape(-1, 1, 7, 40, 80)
- mld = torch.Tensor(mld)
- mld_train = mld[0:train_size, :, :, :, :]
-
- mld_valid = mld[train_size:valid_size, :, :, :, :]
-
- analysis_temp = analysis_temp[:,:,:,:40,:80]
- analysis_temp = torch.Tensor(analysis_temp)
- # analysis_temp = analysis_temp.permute(0,2,1,3,4)
-
- # analysis_temp = analysis_temp.reshape(-1, 7, 10, 40, 80)
- analysis_temp = torch.Tensor(analysis_temp)
- # print('analysis_temp.shape:{}'.format(analysis_temp.shape)) # hycom_temp.shape:(3281, 7, 7, 41, 81)
-
- u = torch.Tensor(u)
- u = u.permute(0, 2, 1, 3, 4)
- u = u.reshape(-1, 7, 7, 40, 80)
- u = torch.Tensor(u)
- u_train = u[0:train_size, :, :, :, :]
- u_valid = u[train_size:valid_size, :, :, :, :]
- #u_3test = u[valid_size:, :, :, :, :]
-
- v = torch.Tensor(v)
- v = v.permute(0, 2, 1, 3, 4)
- v = v.reshape(-1, 7, 7, 40, 80)
- v = torch.Tensor(v)
- v_train = v[0:train_size, :, :, :, :]
- v_valid = v[train_size:valid_size, :, :, :, :]
- #v_test = v[valid_size:, :, :, :, :]
-
- T_d = T_d.reshape(-1, 1, 7, 40, 80)
- T_d = torch.Tensor(T_d)
- T_d_train = T_d[0:train_size, :, :, :, :]
- T_d_valid = T_d[train_size:valid_size, :, :, :, :]
- #T_d_test = T_d[valid_size:, :, :, :, :]
-
- u_d = u_d.reshape(-1, 1, 7, 40, 80)
- u_d = torch.Tensor(u_d)
- u_d_train = u_d[0:train_size, :, :, :, :]
- u_d_valid = u_d[train_size:valid_size, :, :, :, :]
- #u_d_test = u_d[valid_size:, :, :, :, :]
-
- v_d = v_d.reshape(-1, 1, 7, 40, 80)
- v_d = torch.Tensor(v_d)
- v_d_train = v_d[0:train_size, :, :, :, :]
- v_d_valid = v_d[train_size:valid_size, :, :, :, :]
- #v_d_test = v_d[valid_size:, :, :, :, :]
-
-
- dT_dt0 = torch.zeros(2634,7,1,40,80)
- dT_dt0 = np.array(dT_dt0)
- dT_dt = np.concatenate((dT_dt, dT_dt0), axis=2)
- # print('dT_dt.shape:{}'.format(dT_dt.shape))
- dT_dt = dT_dt[:, :, :, :40, :80]
- dT_dt = torch.Tensor(dT_dt)
- dT_dt_train = dT_dt[0:train_size, :, :, :, :]
- dT_dt_valid = dT_dt[train_size:valid_size, :, :, :, :]
- #dT_dt_test = dT_dt[valid_size:, :, :, :, :]
-
- dT_dx = dT_dx[:, :, :, :40, :80]
- dT_dx = torch.Tensor(dT_dx)
- dT_dx_train = dT_dx[0:train_size, :, :, :, :]
- dT_dx_valid = dT_dx[train_size:valid_size, :, :, :, :]
- #dT_dx_test = dT_dx[valid_size:, :, :, :, :]
-
- dT_dy = dT_dy[:, :, :, :40, :80]
- dT_dy = torch.Tensor(dT_dy)
- dT_dy_train = dT_dy[0:train_size, :, :, :, :]
- dT_dy_valid = dT_dy[train_size:valid_size, :, :, :, :]
- #dT_dy_test = dT_dy[valid_size:, :, :, :, :]
-
- train_data = torch.cat((hycom_temp_train, Q_net_train, mld_train, u_train, v_train, T_d_train, v_d_train, dT_dt_train, dT_dx_train, dT_dy_train),dim=1) # train_data.shape:torch.Size([1952, 7, 32, 41, 81]) # 数据 第一维时间 第二维深度和变量 第三维7dat seq 4 5lat lon
-
- # print('train_data.shape:{}'.format(train_data.shape)) #train_data.shape:torch.Size([1952, 46, 7, 40, 80])
-
- valid_data = torch.cat((hycom_temp_valid, Q_net_valid, mld_valid, u_valid, v_valid, T_d_valid, v_d_valid, dT_dt_valid, dT_dx_valid, dT_dy_valid), dim=1) # valid_data.shape:torch.Size([672, 7, 32, 41, 81])
- # print('valid_data.shape:{}'.format(valid_data.shape)) # ([326, 3, 12, 15]) #valid_data.shape:torch.Size([672, 46, 7, 40, 80])
-
- # test_data = torch.cat((hycom_temp_test, Q_net_test, mld_test, u_test, v_test, T_d_test, v_d_test, dT_dt_test, dT_dx_test, dT_dy_test),dim=1) #
- # # print('test_data.shape:{}'.format(test_data.shape)) # ([326, 3, 12, 15])
-
- train_label = analysis_temp[7:train_size + 7, :, :,:] # train_label.shape:torch.Size([1952, 7, 7, 41, 81]) # 需要注意的是,不同与之前的实验,这次的label也经过了7day_for_Nday的过程,所以 只需要改变label nday_for_Nday的n,即可做到n天的预测
- train_label = train_label[:,:,0,:,:]
- # print('train_label.shape:{}'.format(train_label.shape)) #train_label.shape:torch.Size([1952, 7, 40, 80])
-
-
- valid_label = analysis_temp[train_size + 7:valid_size + 7, :, :, :] # valid_label.shape:torch.Size([672, 7, 7, 41, 81])
- valid_label = valid_label[:,:,0,:,:]
- # print('valid_label.shape:{}'.format(valid_label.shape)) #valid_label.shape:torch.Size([672, 7, 40, 80])
-
- train_label = torch.Tensor(train_label)
- valid_label = torch.Tensor(valid_label)
-
- valid_label11 = valid_label
- valid_label0 = valid_label
- import threading
- class MyDataset(Dataset):
- def __init__(self, data, label):
- self.data = torch.Tensor(data)
- self.label = torch.Tensor(label)
-
- def __len__(self):
- return len(self.label)
-
- def __getitem__(self, idx):
- return self.data[idx], self.label[idx]
-
-
- batch_size1 = 32
- batch_size2 = 32
- batch_size3 = 3000
-
-
- for ii in range(1,6):
- for epoch111 in range(100,600,100):
-
-
- trainset = MyDataset(train_data, train_label)
- trainloader = DataLoader(trainset, batch_size=batch_size1, shuffle=True, drop_last=False, pin_memory=True,num_workers=threading.active_count())
- def get_dataloader_generator(trainloader):
- while True:
- for data, label in trainloader:
- yield data, label
- validset = MyDataset(valid_data, valid_label)
- validloader = DataLoader(validset, batch_size=batch_size2, shuffle=True, drop_last=False, pin_memory=True,num_workers=threading.active_count())
- def get_dataloader_generator(validloader):
- while True:
- for data, label in validloader:
- yield data, label
-
- print('-----------------------train_black_cnn----------------------------')
-
-
-
- def setup_seed(seed):
- torch.manual_seed(seed)
- torch.cuda.manual_seed_all(seed)
- np.random.seed(seed)
- random.seed(seed)
- torch.backends.cudnn.deterministic = True
-
-
-
-
-
- model_weights1 = '/model/convlstm_hidd16_epo{}_lr1e-3_lay3_kel3_e{}_black_ahead_8_day_model_weights.pth'.format(epoch111,ii)
-
- print('-----------------------train_black_att_3dcnn_convlstm--------------------------')
- # 输入特征数
- input_dim = 1
- # 隐藏层节点数
- hidden_dim = (16,16,16)
- # 卷积核大小
- kernel_size = (3, 3, 3)
-
- model = ConvLSTM(input_dim, hidden_dim, kernel_size).cuda()
- # model = model.cuda()
- # out = model(a) #torch.Size([1826, 12, 15])
- # print(out.shape)
- criterion = nn.MSELoss()
- # 定义优化器
- optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
-
- epochs = epoch111
- train_losses, valid_losses = [], []
- # best_loss = 2
- best_score = float('inf')
- best_score1 = float('inf')
- preds = np.zeros((672,7,40,80))
- # preds = np.expand_dims(preds, axis=1)#preds.reshape(100,1,12,15)
- # preds = np.zeros((18000,1))
- print(preds.shape)
- sores = []
- def rmse(y_true, y_preds):
- return np.sqrt(mean_squared_error(y_pred = y_preds, y_true = y_true))
-
- for epoch in range(epochs):
- print('Epoch: {}/{}'.format(epoch + 1, epochs))
- # print(var_y)
- #模型训练
- model.train()
- losses = 0
- for data, label in tqdm(trainloader):
- data = data.cuda()
- # label = label.permute(0,2,1,3,4)
- label = label.cuda()
-
- optimizer.zero_grad()
- # print('data.shape:{}'.format(data.shape)) #torch.Size([160, 5, 1, 12, 15])
- out = model(data)
- # print('label.shape:{}'.format(label.shape))
- # print('out.shape:{}'.format(out.shape))
- loss = criterion(out, label)
- # losses +=loss.cpu().detach().numpy
- losses += loss
- # 反向传播
- loss.backward()
- optimizer.step()
- train_loss = losses / len(trainloader)
- train_losses.append(train_loss)
- print('Training Loss: {:.3f}'.format(train_loss))
-
- # # 模型验证
- # model.eval()
- losses = 0
-
- with torch.no_grad():
- for d, data in tqdm(enumerate(validloader)):
- # print('i:{}'.format(i))
- data, labels = data
- data = data.cuda()
- #label = label.permute(0, 2, 1, 3, 4)
- label = label.cuda()
- optimizer.zero_grad()
-
- pred = model(data, train = False)
-
- # print('pred_shape:{}'.format(pred.shape)) #torch.Size([2, 1, 12, 15])
- loss = criterion(pred, label)
- losses += loss
-
- preds[d * batch_size2:(d + 1) * batch_size2] = pred.cpu()
- # print(preds.shape)
- # preds[i * batch_size2:(i + 1) * batch_size2] = np.array(tmp)
- valid_loss = losses / len(validloader)
- valid_losses.append(valid_loss)
- print('Validation Loss: {:.3f}'.format(valid_loss))
-
- valid_label1 = valid_label.reshape(-1,1)
- preds1 = preds.reshape(-1,1)
- # print('valid_label1.shape:{}'.format(valid_label1.shape))#(18000,1)
- # print('preds.shape:{}0'.format(preds.shape)) #(360,1)
- s = rmse(valid_label1,preds1)
- sores.append(s)
- print('Score: {:.3f}'.format(s))
- # 保存最佳模型权重
-
- if valid_loss < best_score1: # 求s的最小值 ---》最大值反过来 inf符号也要反过来
- best_score1 = valid_loss
- checkpoint = {'best_score': valid_loss,
- 'state_dict': model.state_dict()}
- torch.save(checkpoint, model_weights1) # if valid_loss < best_loss:
- best_loss = valid_loss
- torch.save(model.state_dict(),
- '/model/convlstm_hidd16_model_epo{}_lr1e-3_layer3_ahead_8_day_e{}.pt'.format(epoch111,ii))
-
- print(sores)
- print('best_score1:{}'.format(best_score1))
- print('s:{}'.format(s))
- print(valid_losses)
-
- print('valid_losses.:{}'.format(valid_losses))
|