Data Preparation

In [1]:
import numpy as np
import time
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
import plotly.express as px
from skimage import color
import mahotas as mt
from skimage.color import rgb2gray
import warnings
In [2]:
import os
for dirname, _, filenames in os.walk('./input'):
    for filename in filenames:
        print(os.path.join(dirname, filename))
./input\X_test_sat6.csv
./input\X_train_sat6.csv
./input\y_test_sat6.csv
./input\y_train_sat6.csv
In [3]:
train_data_path="./input/X_train_sat6.csv"
train_label_path="./input/y_train_sat6.csv"
test_data_path="./input/X_test_sat6.csv"
test_lable_path="./input/y_test_sat6.csv"
In [4]:
def data_read(data_path, nrows):
    data=pd.read_csv(data_path, header=None, nrows=nrows)
    data=data.values ## converting the data into numpy array
    return data
In [28]:
##Read training data
train_data=data_read(train_data_path, nrows=500)
print("Train data shape:" + str(train_data.shape))

##Read training data labels
train_data_label=data_read(train_label_path,nrows=500)
print("Train data label shape:" + str(train_data_label.shape))
print()

##Read test data
test_data=data_read(test_data_path, nrows=100)
print("Test data shape:" + str(test_data.shape))


##Read test data labels
test_data_label=data_read(test_lable_path,nrows=100)
print("Test data label shape:" + str(test_data_label.shape))
Train data shape:(500, 3136)
Train data label shape:(500, 6)

Test data shape:(100, 3136)
Test data label shape:(100, 6)
In [29]:
#label converter
# [1,0,0,0,0,0]=building
# [0,1,0,0,0,0]=barren_land
# [0,0,1,0,0,0]=trees
# [0,0,0,1,0,0]=grassland
# [0,0,0,0,1,0]=road
# [0,0,0,0,0,1]=water


def label_conv(label_arr):
    labels=[]
    for i in range(len(label_arr)):
        
        if (label_arr[i]==[1,0,0,0,0,0]).all():
            labels.append("Building")  
            
        elif (label_arr[i]==[0,1,0,0,0,0]).all():  
            labels.append("Barren_land")  
            
        elif (label_arr[i]==[0,0,1,0,0,0]).all():
            labels.append("Tree") 
            
        elif (label_arr[i]==[0,0,0,1,0,0]).all():
            labels.append("Grassland")
            
        elif (label_arr[i]==[0,0,0,0,1,0]).all():
            labels.append("Road") 
            
        else:
            labels.append("Water")
    return labels
train_label_convert=label_conv(train_data_label)##train label conveter
test_label_convert=label_conv(test_data_label) ##test label converter


def data_visualization(data, label, n):
    ##data: training or test data
    ##lable: training or test labels
    ## n: number of data point, it should be less than or equal to no. of data points
    fig = plt.figure(figsize=(14, 14))
    ax = []  # ax enables access to manipulate each of subplots
    rows, columns=4,4
    for i in range(columns*rows):
        index=np.random.randint(1,n)
        img= data[index].reshape([28,28,4])[:,:,:3] ##reshape input data to rgb image
        ax.append( fig.add_subplot(rows, columns, i+1) ) # create subplot and append to ax
        ax[-1].set_title("Class:"+str(label[index]))  # set class
        plt.axis("off")
        plt.imshow(img)

    plt.subplots_adjust(wspace=0.1,hspace=0.5)
    plt.show()  # finally, render the plot

Training data visualization

In [29]:
data_visualization(train_data,train_label_convert, n=500)

Test data visualization

In [30]:
data_visualization(test_data,test_label_convert, n=100)
In [31]:
def feature_extractor(input_image_file):
    
        tex_feature=[]
        hsv_feature=[]
        ndvi_feature=[]
        arvi_feature=[]

        for df_chunk in pd.read_csv(input_image_file ,header=None,chunksize = 5000):

            df_chunk=df_chunk.astype("int32")
            data=df_chunk.values


            ################data for HSV and Texture feature##############
            img=data.reshape(-1,28,28,4)[:,:,:,:3]
            #############################################################

            ######################Data for NDVI and ARVI#################

            NIR=data.reshape(-1,28,28,4)[:,:,:,3]
            Red=data.reshape(-1,28,28,4)[:,:,:,2]
            Blue=data.reshape(-1,28,28,4)[:,:,:,0]
            #############################################################

            for i in range(len(data)):

                #######Texture_feature####################################
                textures = mt.features.haralick(img[i])
                ht_mean= textures.mean(axis=0)
                tex_feature.append(ht_mean)
                ##########################################################

                #######hsv_feature#########################################
                img_hsv = color.rgb2hsv(img[i]) # Image into HSV colorspace
                h = img_hsv[:,:,0] # Hue
                s = img_hsv[:,:,1] # Saturation
                v = img_hsv[:,:,2] # Value aka Lightness
                hsv_feature.append((h.mean(),s.mean(),v.mean()))
                ###########################################################

                ##########Calculation of NDVI Feature######################
                NDVI=(NIR[i]-Red[i])/(NIR[i]+Red[i])
                ndvi_feature.append(NDVI.mean())
                ############################################################

                ###################Calculation of ARVI#####################
                a_1=NIR[i] -(2*Red[i]-Blue[i])
                a_2=NIR[i] +(2*Red[i]+Blue[i])
                arvi=a_1/a_2
                arvi_feature.append(arvi.mean())
                #######################################################

        features=[]
        for i in range(len(tex_feature)):
            h_stack=np.hstack((tex_feature[i], hsv_feature[i], ndvi_feature[i], arvi_feature[i]))
            features.append(h_stack)
            
        return features

Features extraction from training data

In [32]:
train_data_features=feature_extractor(train_data_path)
# saving train data features
feature=pd.DataFrame(train_data_features, columns=["feature"+ str(i) for i in range(len(train_data_features[0]))])
feature.to_csv("train_feature_deepstat_6.csv")

Features extraction from test data

In [33]:
#test data features extraction
test_data_features=feature_extractor(test_data_path)
feature_test=pd.DataFrame(test_data_features, columns=["feature"+ str(i) for i in range(len(train_data_features[0]))])
feature_test.to_csv("test_feature_deepsat_6.csv")

Model Building

In [5]:
from sklearn.preprocessing import StandardScaler 
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, BatchNormalization, Activation
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
from time import time
from mlxtend.evaluate import confusion_matrix
from mlxtend.plotting import plot_confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns
Using TensorFlow backend.
In [6]:
def data_read_(data_path):
    df=pd.read_csv(data_path, index_col=[0])
    return df

def label_read(data_path):
    df=pd.read_csv(data_path, header=None)
    return df

Reading training data feature and training data label

In [7]:
train_feature_deepstat_6=data_read_(data_path="./train_feature_deepstat_6.csv")
train_label=label_read(data_path=train_label_path)
print("Training data shape: ",train_feature_deepstat_6.shape)
print("Training label shape: ",train_label.shape)
Training data shape:  (324000, 18)
Training label shape:  (324000, 6)
In [8]:
train_feature_deepstat_6.head()
Out[8]:
feature0 feature1 feature2 feature3 feature4 feature5 feature6 feature7 feature8 feature9 feature10 feature11 feature12 feature13 feature14 feature15 feature16 feature17
0 0.000510 617.467448 0.744604 1208.225664 0.075734 165.287322 4215.435207 7.637573 11.158415 0.000144 5.546250 -0.356272 0.995773 0.365449 0.207631 4.211562e-08 0.130287 0.082918
1 0.001207 417.651225 0.397474 373.466474 0.082166 321.815240 1076.214670 6.722432 10.047084 0.000350 4.048463 -0.345718 0.991588 0.090416 0.258090 8.581116e-08 0.136626 0.142940
2 0.000398 1943.661871 0.403912 1619.892120 0.041302 156.599515 4535.906611 7.854340 11.419267 0.000051 6.343184 -0.394795 0.998089 0.599307 0.510874 5.071253e-08 -0.762366 -0.488413
3 0.000410 1016.512108 0.571321 1181.209671 0.048341 160.536238 3708.326575 7.776289 11.377842 0.000085 5.898673 -0.375375 0.997312 0.579702 0.435979 4.884989e-08 -0.779538 -0.482682
4 0.000647 487.816884 0.296176 345.964345 0.067422 288.690819 896.040495 6.823493 10.881089 0.000118 5.422547 -0.235495 0.972060 0.516917 0.193726 7.365289e-08 -0.659481 -0.320696

Reading test data feature and test data label

In [9]:
test_feature_deepsat_6=data_read_(data_path="./test_feature_deepsat_6.csv")
test_label=label_read(data_path=test_lable_path)
print("Training data shape: ",test_feature_deepsat_6.shape)
print("Training label shape: ",test_label.shape)
Training data shape:  (81000, 18)
Training label shape:  (81000, 6)
In [10]:
test_feature_deepsat_6.head()
Out[10]:
feature0 feature1 feature2 feature3 feature4 feature5 feature6 feature7 feature8 feature9 feature10 feature11 feature12 feature13 feature14 feature15 feature16 feature17
0 0.000472 825.146019 0.483915 799.436334 0.054771 163.380355 2372.599315 7.433824 11.232332 0.000107 5.772372 -0.321464 0.992508 0.342550 0.203484 4.158225e-08 0.300045 0.188149
1 0.000747 450.118996 0.520765 468.906282 0.082015 203.764809 1425.506132 7.055624 10.791574 0.000153 5.301941 -0.278724 0.983755 0.203896 0.131008 5.035616e-08 0.235854 0.156365
2 0.000375 1244.129580 0.599170 1543.860577 0.041937 224.183576 4931.312727 7.980644 11.476607 0.000063 6.082126 -0.409750 0.998548 0.565407 0.333206 6.285180e-08 -0.672770 -0.387071
3 0.001382 388.768318 0.387355 330.283150 0.064144 272.052422 932.364281 6.665846 9.931245 0.000168 4.945995 -0.221024 0.951162 0.068662 0.239222 7.308982e-08 0.243074 0.193786
4 0.000713 435.000939 0.467699 418.204251 0.071743 111.584020 1237.816065 6.963757 10.728918 0.000235 5.109433 -0.268148 0.980983 0.599072 0.514390 3.666192e-08 -0.841739 -0.561151
In [11]:
test_label.head()
Out[11]:
0 1 2 3 4 5
0 0 0 1 0 0 0
1 0 0 1 0 0 0
2 0 0 0 0 0 1
3 0 1 0 0 0 0
4 0 0 0 0 0 1

Data standardization

In [12]:
sc=StandardScaler()

# fit the training data
fit=sc.fit(train_feature_deepstat_6)

## transform the train and test data
train_data_stn=fit.transform(train_feature_deepstat_6)
test_data_stn=fit.transform(test_feature_deepsat_6)
In [13]:
model=Sequential()

#layer1
model.add(Dense(units=50,input_shape=(train_data_stn.shape[1],),use_bias=True))
#model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(0.2))

#layer2
model.add(Dense(units=50, use_bias=True))
#model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(0.2))

#layer3
model.add(Dense(units=6, activation="softmax"))


##ADD early stopping
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=5)
# mc = ModelCheckpoint('best_model.h5', monitor='val_accuracy', mode='max', verbose=1, save_best_only=True)
# tensorboard = TensorBoard(log_dir='logs/{}'.format(time()))
tensorboard=TensorBoard(log_dir='logs/{}'.format(time()))

#compile the model
model.compile(optimizer="adam",loss="categorical_crossentropy",metrics=["accuracy"])

Model Training

In [14]:
# model.fit(train_data_stn, train_label.values, validation_split=0.15, batch_size=512, epochs=150,callbacks=[es, mc,tensorboard]) 
model.fit(train_data_stn, train_label.values, validation_split=0.15, batch_size=150, epochs=500,callbacks=[es]) 
Train on 275400 samples, validate on 48600 samples
Epoch 1/500
275400/275400 [==============================] - 3s 9us/step - loss: 0.1524 - accuracy: 0.9503 - val_loss: 0.0663 - val_accuracy: 0.9758
Epoch 2/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0715 - accuracy: 0.9760 - val_loss: 0.0530 - val_accuracy: 0.9814
Epoch 3/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0607 - accuracy: 0.9796 - val_loss: 0.0473 - val_accuracy: 0.9834
Epoch 4/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0542 - accuracy: 0.9818 - val_loss: 0.0411 - val_accuracy: 0.9863
Epoch 5/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0500 - accuracy: 0.9834 - val_loss: 0.0387 - val_accuracy: 0.9870
Epoch 6/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0468 - accuracy: 0.9845 - val_loss: 0.0372 - val_accuracy: 0.9879
Epoch 7/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0448 - accuracy: 0.9853 - val_loss: 0.0351 - val_accuracy: 0.9882
Epoch 8/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0426 - accuracy: 0.9860 - val_loss: 0.0338 - val_accuracy: 0.9886
Epoch 9/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0417 - accuracy: 0.9860 - val_loss: 0.0327 - val_accuracy: 0.9888
Epoch 10/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0397 - accuracy: 0.9869 - val_loss: 0.0316 - val_accuracy: 0.9894
Epoch 11/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0389 - accuracy: 0.9873 - val_loss: 0.0314 - val_accuracy: 0.9893
Epoch 12/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0377 - accuracy: 0.9875 - val_loss: 0.0299 - val_accuracy: 0.9898
Epoch 13/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0369 - accuracy: 0.9876 - val_loss: 0.0288 - val_accuracy: 0.9900
Epoch 14/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0367 - accuracy: 0.9878 - val_loss: 0.0275 - val_accuracy: 0.9904
Epoch 15/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0357 - accuracy: 0.9882 - val_loss: 0.0276 - val_accuracy: 0.9903
Epoch 16/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0350 - accuracy: 0.9882 - val_loss: 0.0266 - val_accuracy: 0.9904
Epoch 17/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0347 - accuracy: 0.9883 - val_loss: 0.0284 - val_accuracy: 0.9902
Epoch 18/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0340 - accuracy: 0.9887 - val_loss: 0.0250 - val_accuracy: 0.9914
Epoch 19/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0336 - accuracy: 0.9889 - val_loss: 0.0257 - val_accuracy: 0.9915
Epoch 20/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0333 - accuracy: 0.9891 - val_loss: 0.0256 - val_accuracy: 0.9912
Epoch 21/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0332 - accuracy: 0.9889 - val_loss: 0.0261 - val_accuracy: 0.9916
Epoch 22/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0329 - accuracy: 0.9891 - val_loss: 0.0241 - val_accuracy: 0.9921
Epoch 23/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0324 - accuracy: 0.9892 - val_loss: 0.0239 - val_accuracy: 0.9924
Epoch 24/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0322 - accuracy: 0.9895 - val_loss: 0.0237 - val_accuracy: 0.9923
Epoch 25/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0311 - accuracy: 0.9897 - val_loss: 0.0254 - val_accuracy: 0.9916
Epoch 26/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0312 - accuracy: 0.9897 - val_loss: 0.0248 - val_accuracy: 0.9921
Epoch 27/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0311 - accuracy: 0.9899 - val_loss: 0.0229 - val_accuracy: 0.9923
Epoch 28/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0304 - accuracy: 0.9900 - val_loss: 0.0232 - val_accuracy: 0.9922
Epoch 29/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0303 - accuracy: 0.9900 - val_loss: 0.0221 - val_accuracy: 0.9929
Epoch 30/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0297 - accuracy: 0.9902 - val_loss: 0.0234 - val_accuracy: 0.9922
Epoch 31/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0302 - accuracy: 0.9900 - val_loss: 0.0227 - val_accuracy: 0.9926
Epoch 32/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0305 - accuracy: 0.9898 - val_loss: 0.0222 - val_accuracy: 0.9926
Epoch 33/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0298 - accuracy: 0.9901 - val_loss: 0.0217 - val_accuracy: 0.9932
Epoch 34/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0291 - accuracy: 0.9904 - val_loss: 0.0218 - val_accuracy: 0.9930
Epoch 35/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0294 - accuracy: 0.9905 - val_loss: 0.0249 - val_accuracy: 0.9916
Epoch 36/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0294 - accuracy: 0.9902 - val_loss: 0.0211 - val_accuracy: 0.9929
Epoch 37/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0288 - accuracy: 0.9905 - val_loss: 0.0214 - val_accuracy: 0.9928
Epoch 38/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0293 - accuracy: 0.9903 - val_loss: 0.0211 - val_accuracy: 0.9932
Epoch 39/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0284 - accuracy: 0.9909 - val_loss: 0.0208 - val_accuracy: 0.9933
Epoch 40/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0285 - accuracy: 0.9907 - val_loss: 0.0210 - val_accuracy: 0.9933
Epoch 41/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0283 - accuracy: 0.9906 - val_loss: 0.0205 - val_accuracy: 0.9933
Epoch 42/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0285 - accuracy: 0.9906 - val_loss: 0.0208 - val_accuracy: 0.9934
Epoch 43/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0281 - accuracy: 0.9907 - val_loss: 0.0213 - val_accuracy: 0.9930
Epoch 44/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0279 - accuracy: 0.9910 - val_loss: 0.0211 - val_accuracy: 0.9931
Epoch 45/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0281 - accuracy: 0.9910 - val_loss: 0.0214 - val_accuracy: 0.9930
Epoch 46/500
275400/275400 [==============================] - 2s 7us/step - loss: 0.0276 - accuracy: 0.9908 - val_loss: 0.0218 - val_accuracy: 0.9926
Epoch 00046: early stopping
Out[14]:
<keras.callbacks.callbacks.History at 0x20f2c0c9e88>

Save Model

In [15]:
model.save('LandcoverClassifier.h5')
In [18]:
print(model)
<keras.engine.sequential.Sequential object at 0x0000020F2AAB7188>

Model Prediction

In [24]:
test_feature_deepsat_6.head(100)
Out[24]:
feature0 feature1 feature2 feature3 feature4 feature5 feature6 feature7 feature8 feature9 feature10 feature11 feature12 feature13 feature14 feature15 feature16 feature17
0 0.000472 825.146019 0.483915 799.436334 0.054771 163.380355 2372.599315 7.433824 11.232332 0.000107 5.772372 -0.321464 0.992508 0.342550 0.203484 4.158225e-08 0.300045 0.188149
1 0.000747 450.118996 0.520765 468.906282 0.082015 203.764809 1425.506132 7.055624 10.791574 0.000153 5.301941 -0.278724 0.983755 0.203896 0.131008 5.035616e-08 0.235854 0.156365
2 0.000375 1244.129580 0.599170 1543.860577 0.041937 224.183576 4931.312727 7.980644 11.476607 0.000063 6.082126 -0.409750 0.998548 0.565407 0.333206 6.285180e-08 -0.672770 -0.387071
3 0.001382 388.768318 0.387355 330.283150 0.064144 272.052422 932.364281 6.665846 9.931245 0.000168 4.945995 -0.221024 0.951162 0.068662 0.239222 7.308982e-08 0.243074 0.193786
4 0.000713 435.000939 0.467699 418.204251 0.071743 111.584020 1237.816065 6.963757 10.728918 0.000235 5.109433 -0.268148 0.980983 0.599072 0.514390 3.666192e-08 -0.841739 -0.561151
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
95 0.000507 769.789432 0.520759 802.854610 0.057145 201.380149 2441.629007 7.406199 11.167590 0.000109 5.728691 -0.315376 0.991787 0.240206 0.182629 5.038883e-08 0.297621 0.201184
96 0.000979 276.128892 0.296880 203.126393 0.073791 319.503270 536.376680 6.406319 10.356170 0.000167 4.960361 -0.195891 0.942992 0.054210 0.157207 8.245650e-08 0.088285 0.087091
97 0.000959 231.974801 0.435562 208.607496 0.086666 305.278351 602.455181 6.509965 10.368447 0.000197 4.848627 -0.200121 0.947317 0.072103 0.130930 7.731165e-08 0.066433 0.068680
98 0.001872 88.469686 0.578439 104.894500 0.173200 288.793180 331.108316 6.070286 9.571058 0.000360 4.156188 -0.187323 0.922301 0.266215 0.057398 6.905092e-08 -0.192395 -0.076444
99 0.002173 112.919459 0.327707 87.722054 0.126860 248.576409 237.968758 5.857020 9.385043 0.000320 4.330549 -0.155786 0.876229 0.124116 0.139669 6.152372e-08 0.219406 0.153756

100 rows × 18 columns

In [32]:
predicted = model.predict_classes(test_feature_deepsat_6.head(100))
predicted
Out[32]:
array([1, 1, 1, 4, 1, 1, 1, 4, 1, 1, 1, 1, 1, 1, 1, 4, 4, 1, 1, 4, 1, 4,
       4, 1, 4, 0, 1, 4, 1, 4, 1, 1, 1, 1, 0, 4, 1, 1, 4, 1, 1, 1, 1, 4,
       1, 1, 4, 1, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 1, 4, 1, 4, 1, 4, 1,
       4, 4, 1, 1, 1, 4, 1, 1, 4, 1, 1, 1, 4, 1, 1, 1, 1, 1, 1, 4, 1, 1,
       4, 1, 1, 1, 4, 1, 1, 1, 4, 4, 4, 4], dtype=int64)

Model Evaluation

In [36]:
Accuracy_on_test_data=model.evaluate(test_data_stn, test_label.values)[1]
print("Accuracy on test data: ",Accuracy_on_test_data)
81000/81000 [==============================] - 1s 7us/step
Accuracy on test data:  0.9933703541755676

Confusion Matrix

In [37]:
#label converter
# [1,0,0,0,0,0]=building
# [0,1,0,0,0]=barren_land
# [0,0,1,0,0,0]=tree
# [0,0,0,1,0,0]=grassland
# [0,0,0,0,1,0]=road
# [0,0,0,0,0,1]=water


##Building confusion matrix

y_pred=model.predict_classes(test_data_stn)
y_true=np.argmax(test_label.values, axis=1)
cm=confusion_matrix(y_target=y_true, y_predicted=y_pred)

plot_confusion_matrix(cm,class_names=["Building","Barren_land","Tree","Grassland","Road","Water"],figsize=(12,12))
plt.show()

Model Reading

In [1]:
from keras.models import load_model
model = load_model('LandcoverClassifier.h5')
Using TensorFlow backend.
In [2]:
model_json = model.to_json()
model_json
Out[2]:
'{"class_name": "Sequential", "config": {"name": "sequential_1", "layers": [{"class_name": "Dense", "config": {"name": "dense_1", "trainable": true, "batch_input_shape": [null, 18], "dtype": "float32", "units": 50, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "Activation", "config": {"name": "activation_1", "trainable": true, "dtype": "float32", "activation": "relu"}}, {"class_name": "Dropout", "config": {"name": "dropout_1", "trainable": true, "dtype": "float32", "rate": 0.2, "noise_shape": null, "seed": null}}, {"class_name": "Dense", "config": {"name": "dense_2", "trainable": true, "dtype": "float32", "units": 50, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "Activation", "config": {"name": "activation_2", "trainable": true, "dtype": "float32", "activation": "relu"}}, {"class_name": "Dropout", "config": {"name": "dropout_2", "trainable": true, "dtype": "float32", "rate": 0.2, "noise_shape": null, "seed": null}}, {"class_name": "Dense", "config": {"name": "dense_3", "trainable": true, "dtype": "float32", "units": 6, "activation": "softmax", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}]}, "keras_version": "2.3.1", "backend": "tensorflow"}'
In [7]:
import json
with open('model.json', 'w') as f:
     f.write(json.dumps(data, ensure_ascii=False))
---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-7-069a13a430a4> in <module>
      1 import json
      2 with open('model.json', 'w') as f:
----> 3     json.dumps(model_json,f)

TypeError: dumps() takes 1 positional argument but 2 were given
In [ ]: