|
- from keras.activations import sigmoid
- from keras.backend import categorical_crossentropy
- from keras.models import *
- from keras.layers import *
- from keras.optimizers import *
-
-
- def conv3x3(x, out_filters, strides=(1, 1)):
- x = Conv2D(out_filters, 3, padding='same', strides=strides, use_bias=False, kernel_initializer='he_normal')(x)
- return x
-
-
- def Conv2d_BN(x, nb_filter, kernel_size, strides=(1, 1), padding='same', use_activation=True):
- x = Conv2D(nb_filter, kernel_size, padding=padding, strides=strides, kernel_initializer='he_normal')(x)
- x = BatchNormalization(axis=3)(x)
- if use_activation:
- x = Activation('relu')(x)
- return x
- else:
- return x
-
-
- def basic_Block(input, out_filters, strides=(1, 1), with_conv_shortcut=False):
- x = conv3x3(input, out_filters, strides)
- x = BatchNormalization(axis=3)(x)
- x = Activation('relu')(x)
-
- x = conv3x3(x, out_filters)
- x = BatchNormalization(axis=3)(x)
-
- if with_conv_shortcut:
- residual = Conv2D(out_filters, 1, strides=strides, use_bias=False, kernel_initializer='he_normal')(input)
- residual = BatchNormalization(axis=3)(residual)
- x = add([x, residual])
- else:
- x = add([x, input])
-
- x = Activation('relu')(x)
- return x
-
-
- def bottleneck_Block(input, out_filters, strides=(1, 1), with_conv_shortcut=False):
- expansion = 4
- de_filters = int(out_filters / expansion)
-
- x = Conv2D(de_filters, 1, use_bias=False, kernel_initializer='he_normal')(input)
- x = BatchNormalization(axis=3)(x)
- x = Activation('relu')(x)
-
- x = Conv2D(de_filters, 3, strides=strides, padding='same', use_bias=False, kernel_initializer='he_normal')(x)
- x = BatchNormalization(axis=3)(x)
- x = Activation('relu')(x)
-
- x = Conv2D(out_filters, 1, use_bias=False, kernel_initializer='he_normal')(x)
- x = BatchNormalization(axis=3)(x)
-
- if with_conv_shortcut:
- residual = Conv2D(out_filters, 1, strides=strides, use_bias=False, kernel_initializer='he_normal')(input)
- residual = BatchNormalization(axis=3)(residual)
- x = add([x, residual])
- else:
- x = add([x, input])
-
- x = Activation('relu')(x)
- return x
- def canny(x):
- # 水平变化
- sobel_x = tf.constant([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], tf.float32)
- sobel_x_filter = tf.reshape(sobel_x, [3, 3, 1, 1])
- sobel_y_filter = tf.transpose(sobel_x_filter, [1, 0, 2, 3])
- # print(x)
- # print(sobel_y_filter)
- filtered_x = tf.nn.conv2d(x, sobel_x_filter,
- strides=[1, 1, 1, 1], padding='SAME')
-
- filtered_y = tf.nn.conv2d(x, sobel_y_filter,
- strides=[1, 1, 1, 1], padding='SAME')
-
- output = concatenate([filtered_y, filtered_x], axis=3)
- return output
-
- def focal_loss(alpha, gamma):
- def binary_focal_loss_fixed(y_true, y_pred):
- """
- y_true shape need be (None,1)
- y_pred need be compute after sigmoid
- """
- # y_true 是个一阶向量, 下式按照加号分为左右两部分
- # 注意到 y_true的取值只能是 0或者1 (假设二分类问题),可以视为“掩码”
- # 加号左边的 y_true*alpha 表示将 y_true中等于1的槽位置为标量 alpha
- # 加号右边的 (ones-y_true)*(1-alpha) 则是将等于0的槽位置为 1-alpha
- y_true = tf.cast(y_true, tf.float32)
- alpha_t = y_true*alpha + (K.ones_like(y_true)-y_true)*(1-alpha)
- # 类似上面,y_true仍然视为 0/1 掩码
- # 第1部分 `y_true*y_pred` 表示 将 y_true中为1的槽位置为 y_pred对应槽位的值
- # 第2部分 `(ones-y_true)*(ones-y_pred)` 表示 将 y_true中为0的槽位置为 (1-y_pred)对应槽位的值
- # 第3部分 K.epsilon() 避免后面 log(0) 溢出
- p_t = y_true*y_pred + (K.ones_like(y_true)-y_true)*(K.ones_like(y_true)-y_pred) + K.epsilon()
- focal_loss = - alpha_t * K.pow((K.ones_like(y_true)-p_t),gamma) * K.log(p_t)
- return K.mean(focal_loss)
- return binary_focal_loss_fixed
-
- def unet_resnet_101(input_size, num_class, pretrained_weights = False, Falg_summary=True , model_summary=False):
- input = Input(input_size)
-
- conv1_1 = Conv2D(64, 7, strides=(2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(input)
- conv1_1 = BatchNormalization(axis=3)(conv1_1)
- conv1_1 = Activation('relu')(conv1_1)
- conv1_2 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')(conv1_1)
-
- # conv2_x 1/4
- conv2_1 = bottleneck_Block(conv1_2, 256, strides=(1, 1), with_conv_shortcut=True)
- conv2_2 = bottleneck_Block(conv2_1, 256)
- conv2_3 = bottleneck_Block(conv2_2, 256)
-
- # conv3_x 1/8
- conv3_1 = bottleneck_Block(conv2_3, 512, strides=(2, 2), with_conv_shortcut=True)
- conv3_2 = bottleneck_Block(conv3_1, 512)
- conv3_3 = bottleneck_Block(conv3_2, 512)
- conv3_4 = bottleneck_Block(conv3_3, 512)
-
- # conv4_x 1/16
- conv4_1 = bottleneck_Block(conv3_4, 1024, strides=(2, 2), with_conv_shortcut=True)
- conv4_2 = bottleneck_Block(conv4_1, 1024)
- conv4_3 = bottleneck_Block(conv4_2, 1024)
- conv4_4 = bottleneck_Block(conv4_3, 1024)
- conv4_5 = bottleneck_Block(conv4_4, 1024)
- conv4_6 = bottleneck_Block(conv4_5, 1024)
- conv4_7 = bottleneck_Block(conv4_6, 1024)
- conv4_8 = bottleneck_Block(conv4_7, 1024)
- conv4_9 = bottleneck_Block(conv4_8, 1024)
- conv4_10 = bottleneck_Block(conv4_9, 1024)
- conv4_11 = bottleneck_Block(conv4_10, 1024)
- conv4_12 = bottleneck_Block(conv4_11, 1024)
- conv4_13 = bottleneck_Block(conv4_12, 1024)
- conv4_14 = bottleneck_Block(conv4_13, 1024)
- conv4_15 = bottleneck_Block(conv4_14, 1024)
- conv4_16 = bottleneck_Block(conv4_15, 1024)
- conv4_17 = bottleneck_Block(conv4_16, 1024)
- conv4_18 = bottleneck_Block(conv4_17, 1024)
- conv4_19 = bottleneck_Block(conv4_18, 1024)
- conv4_20 = bottleneck_Block(conv4_19, 1024)
- conv4_21 = bottleneck_Block(conv4_20, 1024)
- conv4_22 = bottleneck_Block(conv4_21, 1024)
- conv4_23 = bottleneck_Block(conv4_22, 1024)
-
- # conv5_x 1/32
- conv5_1 = bottleneck_Block(conv4_23, 2048, strides=(2, 2), with_conv_shortcut=True)
- conv5_2 = bottleneck_Block(conv5_1, 2048)
- conv5_3 = bottleneck_Block(conv5_2, 2048)
-
- # # conv4_x 1/16
- up6 = Conv2d_BN(UpSampling2D(size=(2, 2))(conv5_3), 1024, 2)
- # merge6 = concatenate([conv4_23, up6], axis=3)
- # conv6 = Conv2d_BN(merge6, 1024, 3)
- # conv6 = Conv2d_BN(conv6, 1024, 3)
-
- # # conv3_x 1/8
- up7 = Conv2d_BN(UpSampling2D(size=(2, 2))(up6), 512, 2)
- # merge7 = concatenate([conv3_4, up7], axis=3)
- # conv7 = Conv2d_BN(merge7, 512, 3)
- # conv7 = Conv2d_BN(conv7, 512, 3)
-
- # # conv2_x 1/4
- up8 = Conv2d_BN(UpSampling2D(size=(2, 2))(up7), 256, 2)
- # merge8 = concatenate([conv2_3, up8], axis=3)
- # conv8 = Conv2d_BN(merge8, 256, 3)
- # conv8 = Conv2d_BN(conv8, 256, 3)
- # # conv1_x 1/2
- up9 = Conv2d_BN(UpSampling2D(size=(2, 2))(up8), 64, 2)
- # merge9 = concatenate([conv1_1, up9], axis=3)
- # conv9 = Conv2d_BN(merge9, 64, 3)
- # conv9 = Conv2d_BN(conv9, 64, 3)
-
- up10 = Conv2d_BN(UpSampling2D(size=(2, 2))(up9), 64, 2)
- # conv10 = Conv2d_BN(up10, 64, 3)
- # conv10 = Conv2d_BN(conv10, 64, 3)
-
- out = Conv2D(num_class, 1, activation='softmax', name = 'out')(up10)
- # activation = Activation('sigmoid', name='Classification')(conv11)
- # 取出第6维
- sea_super = Lambda(lambda x: x[:, :, :, 6])(out)
- b, row, col = sea_super.get_shape()
- sea_super = Reshape((row, col, 1))(sea_super)
- ConA = Lambda(canny)
- ConA.trainable = False
- sea_super_out = ConA(sea_super)
- out_sea = Conv2D(1, 1, activation='sigmoid', name='out_sea')(sea_super_out)
- model = Model(inputs=input, outputs=[out, out_sea])
-
-
- model.compile(optimizer=Adam(lr=1e-4),
- loss={'out': categorical_crossentropy, 'out_sea': focal_loss(0.25, 2.)},
- metrics={'out': ['accuracy'],
- 'out_sea': ['accuracy']})
- if Falg_summary:
- model.summary()
- if (pretrained_weights):
- model.load_weights(pretrained_weights)
-
- return model
-
- if __name__ == '__main__':
- model = unet_resnet_101(num_class=7, input_size=(256, 256, 4), Falg_summary=True)
|