|
- import os
- os.environ['TL_BACKEND'] = 'tensorflow'
-
- import numpy as np
- import time
-
- import tensorflow as tf
- import tensorlayerx as tlx
- from tensorlayerx.nn import Module
- from tensorlayerx.nn import Linear, Dropout
-
- # Load MNIST data by TensorLayerX
- X_train, y_train, X_val, y_val, X_test, y_test = tlx.files.load_mnist_dataset(shape=(-1, 784))
-
- def generator_train():
- inputs = X_train
- targets = y_train
- if len(inputs) != len(targets):
- raise AssertionError("The length of inputs and targets should be equal")
- for _input, _target in zip(inputs, targets):
- yield _input, _target
-
- # Make Dataset by TensorFlow
- train_ds = tf.data.Dataset.from_generator(generator_train, output_types=(tf.float32, tf.int32))
- shuffle_buffer_size = 128
- batch_size = 128
- train_ds = train_ds.shuffle(shuffle_buffer_size)
- train_ds = train_ds.batch(batch_size)
-
-
- # Define the network through tensorlayerx
- class CustomModel(Module):
-
- def __init__(self):
- super(CustomModel, self).__init__()
- self.dropout1 = Dropout(p=0.2)
- self.linear1 = Linear(out_features=800, in_features=784)
- self.dropout2 = Dropout(p=0.2)
- self.linear2 = Linear(out_features=800, act=tlx.nn.ReLU, in_features=800)
- self.dropout3 = Dropout(p=0.2)
- self.linear3 = Linear(out_features=10, act=tlx.nn.ReLU, in_features=800)
-
- def forward(self, x):
- z = self.dropout1(x)
- z = self.linear1(z)
- z = self.dropout2(z)
- z = self.linear2(z)
- z = self.dropout3(z)
- out = self.linear3(z)
- return out
-
-
- MLP = CustomModel()
- n_epoch = 2
- batch_size = 500
- print_freq = 1
- train_weights = MLP.trainable_weights
- # Define the optimizer through tensorlayerx
- optimizer = tlx.optimizers.Adam(lr=0.0001)
-
- for epoch in range(n_epoch): ## iterate the dataset n_epoch times
- start_time = time.time()
- ## iterate over the entire training set once (shuffle the data via training)
- for X_batch, y_batch in train_ds :
- MLP.set_train() # enable dropout
- with tf.GradientTape() as tape: # use tf.GradientTape() to record gradient
- ## compute outputs
- _logits = MLP(X_batch)
- ## compute loss and update model
- _loss = tlx.losses.softmax_cross_entropy_with_logits(_logits, y_batch)
- grad = tape.gradient(_loss, train_weights)
- optimizer.apply_gradients(zip(grad, train_weights))
- print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time))
- train_loss, train_acc, n_iter = 0, 0, 0
- train_loss += tlx.losses.softmax_cross_entropy_with_logits(_logits, y_batch)
- train_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch))
- n_iter += 1
- print(" train loss: {}".format(train_loss / n_iter))
- print(" train acc: {}".format(train_acc / n_iter))
|