|
- from __future__ import division
- from keras.models import Model
- from keras.layers import Input, concatenate, Conv2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout
- from keras.optimizers import Adam
- from keras.callbacks import ModelCheckpoint, LearningRateScheduler
- from keras import backend as K
- from keras.utils.vis_utils import plot_model as plot
- from keras.optimizers import SGD
- from keras.optimizers import *
- from keras.layers import *
- from keras.applications.vgg16 import VGG16
- import keras
-
-
-
- def BCDU_net_D3(input_size = (256,256,1)):
- N = input_size[0]
- inputs = Input(input_size)
- conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
- conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
-
- pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
- conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
- conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
- pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
- conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
- conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
- drop3 = Dropout(0.5)(conv3)
- pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
- # D1
- conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
- conv4_1 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
- drop4_1 = Dropout(0.5)(conv4_1)
- # D2
- conv4_2 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(drop4_1)
- conv4_2 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4_2)
- conv4_2 = Dropout(0.5)(conv4_2)
- # D3
- merge_dense = concatenate([conv4_2,drop4_1], axis = 3)
- conv4_3 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge_dense)
- conv4_3 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4_3)
- drop4_3 = Dropout(0.5)(conv4_3)
-
- up6 = Conv2DTranspose(256, kernel_size=2, strides=2, padding='same',kernel_initializer = 'he_normal')(drop4_3)
- up6 = BatchNormalization(axis=3)(up6)
- up6 = Activation('relu')(up6)
-
- x1 = Reshape(target_shape=(1, np.int32(N/4), np.int32(N/4), 256))(drop3)
- x2 = Reshape(target_shape=(1, np.int32(N/4), np.int32(N/4), 256))(up6)
- merge6 = concatenate([x1,x2], axis = 1)
- merge6 = ConvLSTM2D(filters = 128, kernel_size=(3, 3), padding='same', return_sequences = False, go_backwards = True,kernel_initializer = 'he_normal' )(merge6)
-
- conv6 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
- conv6 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
-
- up7 = Conv2DTranspose(128, kernel_size=2, strides=2, padding='same',kernel_initializer = 'he_normal')(conv6)
- up7 = BatchNormalization(axis=3)(up7)
- up7 = Activation('relu')(up7)
-
- x1 = Reshape(target_shape=(1, np.int32(N/2), np.int32(N/2), 128))(conv2)
- x2 = Reshape(target_shape=(1, np.int32(N/2), np.int32(N/2), 128))(up7)
- merge7 = concatenate([x1,x2], axis = 1)
- merge7 = ConvLSTM2D(filters = 64, kernel_size=(3, 3), padding='same', return_sequences = False, go_backwards = True,kernel_initializer = 'he_normal' )(merge7)
-
- conv7 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
- conv7 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
-
- up8 = Conv2DTranspose(64, kernel_size=2, strides=2, padding='same',kernel_initializer = 'he_normal')(conv7)
- up8 = BatchNormalization(axis=3)(up8)
- up8 = Activation('relu')(up8)
-
- x1 = Reshape(target_shape=(1, N, N, 64))(conv1)
- x2 = Reshape(target_shape=(1, N, N, 64))(up8)
- merge8 = concatenate([x1,x2], axis = 1)
- merge8 = ConvLSTM2D(filters = 32, kernel_size=(3, 3), padding='same', return_sequences = False, go_backwards = True,kernel_initializer = 'he_normal' )(merge8)
-
- conv8 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
- conv8 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
- conv8 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
- conv9 = Conv2D(1, 1, activation = 'sigmoid')(conv8)
-
- model = Model(input = inputs, output = conv9)
- model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy'])
- return model
-
- def BCDU_net_D1(input_size = (256,256,1)):
- N = input_size[0]
- inputs = Input(input_size)
- conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
- conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
-
- pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
- conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
- conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
- pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
- conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
- conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
- drop3 = Dropout(0.5)(conv3)
- pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
- # D1
- conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
- conv4_1 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
- drop4_1 = Dropout(0.5)(conv4_1)
-
- up6 = Conv2DTranspose(256, kernel_size=2, strides=2, padding='same',kernel_initializer = 'he_normal')(drop4_1)
- up6 = BatchNormalization(axis=3)(up6)
- up6 = Activation('relu')(up6)
-
- x1 = Reshape(target_shape=(1, np.int32(N/4), np.int32(N/4), 256))(drop3)
- x2 = Reshape(target_shape=(1, np.int32(N/4), np.int32(N/4), 256))(up6)
- merge6 = concatenate([x1,x2], axis = 1)
- merge6 = ConvLSTM2D(filters = 128, kernel_size=(3, 3), padding='same', return_sequences = False, go_backwards = True,kernel_initializer = 'he_normal' )(merge6)
-
- conv6 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
- conv6 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
-
- up7 = Conv2DTranspose(128, kernel_size=2, strides=2, padding='same',kernel_initializer = 'he_normal')(conv6)
- up7 = BatchNormalization(axis=3)(up7)
- up7 = Activation('relu')(up7)
-
- x1 = Reshape(target_shape=(1, np.int32(N/2), np.int32(N/2), 128))(conv2)
- x2 = Reshape(target_shape=(1, np.int32(N/2), np.int32(N/2), 128))(up7)
- merge7 = concatenate([x1,x2], axis = 1)
- merge7 = ConvLSTM2D(filters = 64, kernel_size=(3, 3), padding='same', return_sequences = False, go_backwards = True,kernel_initializer = 'he_normal' )(merge7)
-
- conv7 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
- conv7 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
-
- up8 = Conv2DTranspose(64, kernel_size=2, strides=2, padding='same',kernel_initializer = 'he_normal')(conv7)
- up8 = BatchNormalization(axis=3)(up8)
- up8 = Activation('relu')(up8)
-
- x1 = Reshape(target_shape=(1, N, N, 64))(conv1)
- x2 = Reshape(target_shape=(1, N, N, 64))(up8)
- merge8 = concatenate([x1,x2], axis = 1)
- merge8 = ConvLSTM2D(filters = 32, kernel_size=(3, 3), padding='same', return_sequences = False, go_backwards = True,kernel_initializer = 'he_normal' )(merge8)
-
- conv8 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
- conv8 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
- conv8 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
- conv9 = Conv2D(1, 1, activation = 'sigmoid')(conv8)
-
- model = Model(input = inputs, output = conv9)
- model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy'])
- return model
-
-
- def unet(input_size = (256,256,1)):
- inputs = Input(input_size)
- conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
- conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
- pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
- conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
- conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
- pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
- conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
- conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
- drop3 = Dropout(0.5)(conv3)
- pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
- conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
- conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
- drop4 = Dropout(0.5)(conv4)
-
- up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop4))
- merge6 = concatenate([drop3,up6], axis = 3)
- conv6 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
- conv6 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
-
- up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
- merge7 = concatenate([conv2,up7], axis = 3)
- conv7 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
- conv7 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
-
- up8 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
- merge8 = concatenate([conv1,up8], axis = 3)
- conv8 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
- conv8 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
- conv8 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
- conv9 = Conv2D(1, 1, activation = 'sigmoid')(conv8)
-
- model = Model(input = inputs, output = conv9)
-
- model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy'])
-
- return model
-
- def SqueezeExcite(x, ratio=16):
- nb_chan = K.int_shape(x)[-1]
-
- y = GlobalAveragePooling2D()(x)
- y = Dense(nb_chan // ratio, activation='relu')(y)
- y = Dense(nb_chan, activation='sigmoid')(y)
-
- y = Multiply()([x, y])
- return y
-
- WEIGHTS_PATH_NO_TOP = ('https://github.com/fchollet/deep-learning-models/'
- 'releases/download/v0.1/'
- 'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5')
-
- def SEDU_Net_D3(input_size = (256,256,1), learning_rate = 1e-4):
- img_input = Input(input_size)
- N = input_size[0]
- # Block 1
- x = layers.Conv2D(64, (3, 3),
- activation='relu',
- padding='same',
- name='block1_conv1')(img_input)
- conv1 = layers.Conv2D(64, (3, 3),
- activation='relu',
- padding='same',
- name='block1_conv2')(x)
- pool1 = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(conv1)
-
- # Block 2
- x = layers.Conv2D(128, (3, 3),
- activation='relu',
- padding='same',
- name='block2_conv1')(pool1)
- conv2 = layers.Conv2D(128, (3, 3),
- activation='relu',
- padding='same',
- name='block2_conv2')(x)
- pool2 = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(conv2)
-
- # Block 3
- x = layers.Conv2D(256, (3, 3),
- activation='relu',
- padding='same',
- name='block3_conv1')(pool2)
- x = layers.Conv2D(256, (3, 3),
- activation='relu',
- padding='same',
- name='block3_conv2')(x)
- conv3 = layers.Conv2D(256, (3, 3),
- activation='relu',
- padding='same',
- name='block3_conv3')(x)
- pool3 = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(conv3)
- # Block 4
- x = layers.Conv2D(512, (3, 3),
- activation='relu',
- padding='same',
- name='block4_conv1')(pool3)
- x = layers.Conv2D(512, (3, 3),
- activation='relu',
- padding='same',
- name='block4_conv2')(x)
- # D1
- drop4_1 = layers.Conv2D(512, (3, 3),
- activation='relu',
- padding='same',
- name='block4_conv3')(x)
- # D2
- conv4_2 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(drop4_1)
- conv4_2 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4_2)
- conv4_2 = Dropout(0.5)(conv4_2)
- # D3
- merge_dense = concatenate([conv4_2,drop4_1], axis = 3)
- conv4_3 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge_dense)
- conv4_3 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4_3)
- drop4_3 = Dropout(0.5)(conv4_3)
-
- up6 = Conv2DTranspose(256, kernel_size=2, strides=2, padding='same',kernel_initializer = 'he_normal')(drop4_3)
- up6 = SqueezeExcite(up6, ratio=16)
- up6 = BatchNormalization(axis=3)(up6)
- up6 = Activation('relu')(up6)
-
- x1 = Reshape(target_shape=(1, np.int32(N/4), np.int32(N/4), 256))(conv3)
- x2 = Reshape(target_shape=(1, np.int32(N/4), np.int32(N/4), 256))(up6)
- merge6 = concatenate([x1,x2], axis = 1)
- merge6 = ConvLSTM2D(filters = 256, kernel_size=(3, 3), padding='same', return_sequences = False, go_backwards = True,kernel_initializer = 'he_normal' )(merge6)
-
- conv6 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
- conv6 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
- conv6 = SqueezeExcite(conv6, ratio=16)
-
- up7 = Conv2DTranspose(128, kernel_size=2, strides=2, padding='same',kernel_initializer = 'he_normal')(conv6)
- up7 = SqueezeExcite(up7, ratio=16)
- up7 = BatchNormalization(axis=3)(up7)
- up7 = Activation('relu')(up7)
-
- x1 = Reshape(target_shape=(1, np.int32(N/2), np.int32(N/2), 128))(conv2)
- x2 = Reshape(target_shape=(1, np.int32(N/2), np.int32(N/2), 128))(up7)
- merge7 = concatenate([x1,x2], axis = 1)
- merge7 = ConvLSTM2D(filters = 128, kernel_size=(3, 3), padding='same', return_sequences = False, go_backwards = True,kernel_initializer = 'he_normal' )(merge7)
-
- conv7 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
- conv7 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
- conv7 = SqueezeExcite(conv7, ratio=16)
-
- up8 = Conv2DTranspose(64, kernel_size=2, strides=2, padding='same',kernel_initializer = 'he_normal')(conv7)
- up8 = SqueezeExcite(up8, ratio=16)
- up8 = BatchNormalization(axis=3)(up8)
- up8 = Activation('relu')(up8)
-
- x1 = Reshape(target_shape=(1, N, N, 64))(conv1)
- x2 = Reshape(target_shape=(1, N, N, 64))(up8)
- merge8 = concatenate([x1,x2], axis = 1)
- merge8 = ConvLSTM2D(filters = 64, kernel_size=(3, 3), padding='same', return_sequences = False, go_backwards = True,kernel_initializer = 'he_normal' )(merge8)
-
- conv8 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
- conv8 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
- conv8 = SqueezeExcite(conv8, ratio=16)
- conv8 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
- conv9 = Conv2D(1, 1, activation = 'sigmoid')(conv8)
-
- # Load weights.
- weights_path = keras.utils.get_file(
- 'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
- WEIGHTS_PATH_NO_TOP,
- cache_subdir='models',
- file_hash='6d6bbae143d832006294945121d1f1fc')
- model = Model(input = img_input, output = conv9)
- model.load_weights(weights_path, by_name=True)
-
- model.compile(optimizer = Adam(lr = learning_rate), loss = 'binary_crossentropy', metrics = ['accuracy'])
- return model
|