Page Menu
Home
c4science
Search
Configure Global Search
Log In
Files
F109996419
segnet_main.py
No One
Temporary
Actions
Download File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Subscribers
None
File Metadata
Details
File Info
Storage
Attached
Created
Thu, Apr 24, 07:10
Size
4 KB
Mime Type
text/x-python
Expires
Sat, Apr 26, 07:10 (2 d)
Engine
blob
Format
Raw Data
Handle
25755608
Attached To
R8797 solarPV
segnet_main.py
View Options
import
os
import
tensorflow
import
numpy
as
np
import
scipy
import
data_loaders
import
datetime
import
sys
# import cv2
from
scipy
import
ndimage
#from matplotlib import pyplot as plt
from
PIL
import
Image
from
keras.models
import
*
from
keras.layers
import
Reshape
from
keras.layers.core
import
*
from
keras.layers
import
*
from
keras.layers.normalization
import
BatchNormalization
from
keras.layers.convolutional
import
*
from
keras.layers.advanced_activations
import
LeakyReLU
from
keras.optimizers
import
Adam
,
SGD
from
keras.utils
import
np_utils
from
keras
import
backend
as
K
def
weighted_categorical_crossentropy
(
weights
):
"""
A weighted version of keras.objectives.categorical_crossentropy
Variables:
weights: numpy array of shape (C,) where C is the number of classes
Usage:
weights = np.array([0.5,2,10]) # Class one at 0.5, class 2 twice the normal weights, class 3 10x.
loss = weighted_categorical_crossentropy(weights)
model.compile(loss=loss,optimizer='adam')
"""
weights
=
K
.
variable
(
weights
)
def
loss
(
y_true
,
y_pred
):
# scale predictions so that the class probas of each sample sum to 1
y_pred
/=
K
.
sum
(
y_pred
,
axis
=-
1
,
keepdims
=
True
)
# clip to prevent NaN's and Inf's
y_pred
=
K
.
clip
(
y_pred
,
K
.
epsilon
(),
1
-
K
.
epsilon
())
# calc
loss
=
y_true
*
K
.
log
(
y_pred
)
*
weights
loss
=
-
K
.
sum
(
loss
,
-
1
)
return
loss
return
loss
default_w
=
[
1
,
1
]
def
segnet
(
nClasses
,
optimizer
=
None
,
input_height
=
360
,
input_width
=
480
,
weights
=
default_w
):
kernel
=
3
# Size of the kernel
first_conv_windows
=
16
# Will determine complexity of the model
# Original is 64 yields 29.000.000 parameters
# 4 yields 21.785 parameters
pad
=
3
# so that image dimensions are at least 2*2*2*2*n shape (4 maxpooling...)
pool_size
=
2
model
=
Sequential
()
# encoder
model
.
add
(
ZeroPadding2D
(
padding
=
3
,
input_shape
=
(
input_height
,
input_width
,
3
)))
model
.
add
(
Conv2D
(
first_conv_windows
,
kernel_size
=
kernel
,
padding
=
'same'
))
model
.
add
(
BatchNormalization
())
model
.
add
(
Activation
(
'relu'
))
model
.
add
(
MaxPooling2D
())
model
.
add
(
Conv2D
(
first_conv_windows
*
2
,
kernel_size
=
kernel
,
padding
=
'same'
))
model
.
add
(
BatchNormalization
())
model
.
add
(
Activation
(
'relu'
))
model
.
add
(
MaxPooling2D
())
model
.
add
(
Conv2D
(
first_conv_windows
*
4
,
kernel_size
=
kernel
,
padding
=
'same'
))
model
.
add
(
BatchNormalization
())
model
.
add
(
Activation
(
'relu'
))
model
.
add
(
MaxPooling2D
())
model
.
add
(
Conv2D
(
first_conv_windows
*
8
,
kernel_size
=
kernel
,
padding
=
'same'
))
model
.
add
(
BatchNormalization
())
model
.
add
(
Activation
(
'relu'
))
# Decoder
model
.
add
(
Conv2D
(
first_conv_windows
*
8
,
kernel_size
=
kernel
,
padding
=
'same'
))
model
.
add
(
BatchNormalization
())
model
.
add
(
UpSampling2D
(
size
=
(
pool_size
,
pool_size
)))
model
.
add
(
Conv2D
(
first_conv_windows
*
4
,
kernel_size
=
kernel
,
padding
=
'same'
))
model
.
add
(
BatchNormalization
())
model
.
add
(
UpSampling2D
(
size
=
(
pool_size
,
pool_size
)))
model
.
add
(
Conv2D
(
first_conv_windows
*
2
,
kernel_size
=
kernel
,
padding
=
'same'
))
model
.
add
(
BatchNormalization
())
model
.
add
(
UpSampling2D
(
size
=
(
pool_size
,
pool_size
)))
model
.
add
(
Conv2D
(
first_conv_windows
,
kernel_size
=
kernel
,
padding
=
'same'
))
model
.
add
(
BatchNormalization
())
model
.
add
(
Conv2D
(
nClasses
,
kernel_size
=
1
,
padding
=
'same'
))
model
.
add
(
Cropping2D
(
cropping
=
pad
))
model
.
add
(
Reshape
((
nClasses
,
input_height
*
input_width
)))
model
.
add
(
Permute
((
2
,
1
)))
model
.
add
(
Activation
(
'softmax'
))
if
not
optimizer
is
None
:
model
.
compile
(
loss
=
weighted_categorical_crossentropy
(
weights
),
optimizer
=
optimizer
,
metrics
=
[
'accuracy'
])
return
model
def
run
(
rotations
,
lights
,
dice
,
weight1
,
weight2
,
epochs
):
original_images
,
labeled_images
=
data_loaders
.
load_images
(
lights
=
lights
,
rotations
=
rotations
)
X_train
=
np
.
array
(
original_images
)
Y_train_flat
=
np
.
array
(
labeled_images
)
.
reshape
(
len
(
labeled_images
),
-
1
)
# One dimension for each channel (one channel PV, one channel non-PV)
Y_train_2d
=
np
.
stack
((
np
.
where
(
Y_train_flat
==
0
,
1
,
0
),
Y_train_flat
),
axis
=-
1
)
.
reshape
(
len
(
X_train
),
-
1
,
2
)
inp_shape
=
X_train
[
0
]
.
shape
out_shape_flat
=
Y_train_flat
[
0
]
.
shape
out_shape_2d
=
Y_train_2d
[
0
]
.
shape
model_u
=
segnet
(
2
,
optimizer
=
'adam'
,
input_width
=
250
,
input_height
=
250
,
weights
=
[
weight1
,
weight2
])
model_u
.
summary
()
model_name
=
str
(
datetime
.
date
.
today
())
+
"segnet_dice_{:}_{:}_epochs:_{:}"
.
format
(
dice
,
(
weight1
,
weight2
),
epochs
)
print
(
model_name
)
model_u
.
fit
(
X_train
,
Y_train_2d
,
validation_split
=
0.1
,
epochs
=
epochs
)
# Save the weights
model_u
.
save
(
'/home/mesguerr/models/segnet{:}.h5'
.
format
(
model_name
))
# Save the model architecture
if
__name__
==
"__main__"
:
rotations
=
int
(
sys
.
argv
[
1
])
lights
=
int
(
sys
.
argv
[
2
])
dice
=
int
(
sys
.
argv
[
3
])
weight1
=
float
(
sys
.
argv
[
4
])
weight2
=
float
(
sys
.
argv
[
5
])
epochs
=
int
(
sys
.
argv
[
6
])
run
(
rotations
,
lights
,
dice
,
weight1
,
weight2
,
epochs
)
Event Timeline
Log In to Comment