Add trainings stuff

This commit is contained in:
TimNiklasWitte
2022-03-30 17:01:33 +02:00
parent a61a62e92d
commit 736ea31530
61 changed files with 721 additions and 12 deletions

BIN
Plots/ColoredImages_1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.0 MiB

BIN
Plots/ColoredImages_2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.0 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.5 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.0 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

View File

@@ -0,0 +1,32 @@
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def main():
df_train_loss = pd.read_csv('run-.-tag-Train loss.csv', sep=',')
train_loss = df_train_loss["Value"]
df_test_loss = pd.read_csv('run-.-tag-Test loss.csv', sep=',')
test_loss = df_test_loss["Value"]
x = np.arange(len(train_loss))
plt.plot(x, train_loss, label="Train loss", color="r")
plt.plot(x, test_loss, label="Test loss", color="b")
plt.legend()
plt.grid(True)
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.savefig("TrainTestLoss.png")
plt.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("KeyboardInterrupt received")

View File

@@ -0,0 +1,182 @@
import tensorflow as tf
import tensorflow_datasets as tfds
import matplotlib.pyplot as plt
import sys
sys.path.append("..")
from Autoencoder import Autoencoder
from Training import prepare_data, getRGB
import numpy as np
import os
#from Training import prepare_data, getRGB
from Colorful_Image_Colorization.model import build_model
from Colorful_Image_Colorization.config import img_rows, img_cols
from Colorful_Image_Colorization.config import nb_neighbors, T, epsilon
import cv2 as cv
def main():
# Create Imagenet
labels_path = tf.keras.utils.get_file('ImageNetLabels.txt','https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt')
imagenet_labels = np.array(open(labels_path).read().splitlines())
data_dir = '/home/timwitte/Downloads/'
write_dir = '../imagenet'
# Construct a tf.data.Dataset
download_config = tfds.download.DownloadConfig(
extract_dir=os.path.join(write_dir, 'extracted'),
manual_dir=data_dir
)
download_and_prepare_kwargs = {
'download_dir': os.path.join(write_dir, 'downloaded'),
'download_config': download_config,
}
train_dataset, test_dataset= tfds.load('imagenet2012',
data_dir=os.path.join(write_dir, 'data'),
split=['train', 'validation'],
shuffle_files=True,
download=True,
as_supervised=True,
download_and_prepare_kwargs=download_and_prepare_kwargs)
test_dataset = test_dataset.take(32).apply(prepare_data)
# Load our model
model_our = Autoencoder()
model_our.build((1, 256, 256, 1)) # need a batch size
model_our.load_weights("../saved_models/trainied_weights_epoch_12")
# Load model to compare
model_weights_path = '../Colorful_Image_Colorization/model.06-2.5489.hdf5'
model_toCompare = build_model()
model_toCompare.load_weights(model_weights_path)
loss_function = tf.keras.losses.MeanSquaredError()
for img_L, img_AB_orginal in test_dataset.take(1):
img_rgb_orginal = getRGB(img_L, img_AB_orginal)
img_AB_reconstructed_our = model_our.predict(img_L.numpy())
img_rgb_reconstructed_our = getRGB(img_L, img_AB_reconstructed_our)
NUM_IMGS = 5
fig, axs = plt.subplots(NUM_IMGS, 4)
axs[0, 0].set_title("Input", fontsize=30)
axs[0, 1].set_title("Richard Zhang $\it{et\ al.}$", fontsize=30,)
axs[0, 2].set_title("Ours", fontsize=30)
axs[0, 3].set_title("Ground Truth", fontsize=30)
losses1 = []
losses2 = []
for i in range(NUM_IMGS):
img_AB_reconstructed_toCompare = getABFromModel(model_toCompare, img_L[i].numpy())
img_rgb_reconstructed_toCompare = getRGB(img_L[i], img_AB_reconstructed_toCompare, batch_mode=False)
axs[i, 0].imshow(img_L[i], cmap="gray")
axs[i, 0].set_axis_off()
axs[i, 1].imshow(img_rgb_reconstructed_toCompare)
axs[i, 1].set_axis_off()
axs[i, 2].imshow(img_rgb_reconstructed_our[i])
axs[i, 2].set_axis_off()
axs[i, 3].imshow(img_rgb_orginal[i])
axs[i, 3].set_axis_off()
loss_our = loss_function(img_rgb_orginal[i], img_rgb_reconstructed_our[i])
loss_toCompare = loss_function(img_rgb_orginal[i], img_rgb_reconstructed_our)
losses1.append(loss_our)
losses2.append(loss_toCompare)
plt.tight_layout()
fig.set_size_inches(20, 25)
fig.savefig("ColoredImages_compareModels.png")
# Reset plot
plt.clf()
plt.cla()
fig = plt.figure()
# Create bar plot
x_axis = np.arange(NUM_IMGS)
width = 0.2
plt.bar(x_axis - width/2., losses2, width=width/2, label = "Richard Zhang $\it{et\ al.}$")
plt.bar(x_axis - width/2. + 1/float(2)*width, losses1, width=width/2, label = 'Ours')
plt.xticks(x_axis,[f"No. {i}" for i in range(NUM_IMGS)])
plt.title("Loss of colorized images")
plt.xlabel("Image")
plt.ylabel("Loss")
plt.legend()
plt.tight_layout()
plt.savefig("ColorizedImagesLossPlot_comparedModels.png")
def getABFromModel(model, grey_img):
# code taken from https://github.com/foamliu/Colorful-Image-Colorization/blob/master/demo.py
q_ab = np.load("../Colorful_Image_Colorization/pts_in_hull.npy")
nb_q = q_ab.shape[0]
grey_img = np.expand_dims(grey_img, axis=0)
X_colorized = model.predict((grey_img+1)/2)
h, w = img_rows // 4, img_cols // 4
X_colorized = X_colorized.reshape((h * w, nb_q))
# Reweight probas
X_colorized = np.exp(np.log(X_colorized + epsilon) / T)
X_colorized = X_colorized / np.sum(X_colorized, 1)[:, np.newaxis]
# Reweighted
q_a = q_ab[:, 0].reshape((1, 313))
q_b = q_ab[:, 1].reshape((1, 313))
X_a = np.sum(X_colorized * q_a, 1).reshape((h, w))
X_b = np.sum(X_colorized * q_b, 1).reshape((h, w))
X_a = cv.resize(X_a, (img_rows, img_cols), cv.INTER_CUBIC)
X_b = cv.resize(X_b, (img_rows, img_cols), cv.INTER_CUBIC)
# Before: -90 <=a<= 100, -110 <=b<= 110
# After: 38 <=a<= 228, 18 <=b<= 238
X_a = X_a + 128
X_b = X_b + 128
out_lab = np.zeros((256, 256, 2), dtype=np.float32)
grey_img = np.reshape(grey_img, newshape=(256,256))
out_lab[:, :, 0] = X_a
out_lab[:, :, 1] = X_b
out_lab[:, :, 0] = -1.0 + 2*(out_lab[:, :, 0] - 38.0)/190
out_lab[:, :, 1] = -1.0 + 2*(out_lab[:, :, 1] - 20.0)/203
return out_lab
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("KeyboardInterrupt received")

View File

@@ -0,0 +1,84 @@
import tensorflow as tf
import tensorflow_datasets as tfds
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
sys.path.append("..")
from Autoencoder import Autoencoder
from Training import prepare_data, getRGB
def main():
labels_path = tf.keras.utils.get_file('ImageNetLabels.txt','https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt')
imagenet_labels = np.array(open(labels_path).read().splitlines())
data_dir = '/home/timwitte/Downloads/'
write_dir = '../imagenet'
# Construct a tf.data.Dataset
download_config = tfds.download.DownloadConfig(
extract_dir=os.path.join(write_dir, 'extracted'),
manual_dir=data_dir
)
download_and_prepare_kwargs = {
'download_dir': os.path.join(write_dir, 'downloaded'),
'download_config': download_config,
}
train_dataset, test_dataset= tfds.load('imagenet2012',
data_dir=os.path.join(write_dir, 'data'),
split=['train', 'validation'],
shuffle_files=True,
download=True,
as_supervised=True,
download_and_prepare_kwargs=download_and_prepare_kwargs)
test_dataset = test_dataset.take(32).apply(prepare_data)
autoencoder = Autoencoder()
autoencoder.build((1, 256, 256, 1)) # need a batch size
autoencoder.load_weights("../saved_models/trainied_weights_epoch_12")
autoencoder.summary()
autoencoder.encoder.summary()
autoencoder.decoder.summary()
for img_L, img_AB_orginal in test_dataset.take(1):
img_AB_reconstructed = autoencoder(img_L)
img_rgb_orginal = getRGB(img_L, img_AB_orginal)
img_rgb_reconstructed = getRGB(img_L, img_AB_reconstructed)
NUM_IMGS = 5
fig, axs = plt.subplots(NUM_IMGS, 3)
axs[0, 0].set_title("Input", fontsize=30)
axs[0, 1].set_title("Output", fontsize=30)
axs[0, 2].set_title("Ground Truth", fontsize=30)
for i in range(NUM_IMGS):
axs[i, 0].imshow(img_L[i], cmap="gray")
axs[i, 0].set_axis_off()
axs[i, 1].imshow(img_rgb_reconstructed[i])
axs[i, 1].set_axis_off()
axs[i, 2].imshow(img_rgb_orginal[i])
axs[i, 2].set_axis_off()
plt.tight_layout()
fig.set_size_inches(15, 25)
fig.savefig("ColoredImages.png")
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("KeyboardInterrupt received")

View File

@@ -0,0 +1,33 @@
import tensorflow as tf
from EncoderLayers import *
from DecoderLayers import *
import sys
sys.path.append("../..")
from Colorful_Image_Colorization.model import *
def main():
encoder_layers = EncoderLayers()
decoder_layers = DecoderLayers()
inputs = tf.keras.Input(shape=(256,256, 1), name="Grey image")
encoder = tf.keras.Model(inputs=[inputs],outputs=encoder_layers.call(inputs))
embedding = tf.keras.Input(shape=(32,32, 3), name="Embedding")
decoder = tf.keras.Model(inputs=[embedding],outputs=decoder_layers.call(embedding))
tf.keras.utils.plot_model(encoder,show_shapes=True, show_layer_names=True, to_file="EncoderLayer.png")
tf.keras.utils.plot_model(decoder,show_shapes=True, show_layer_names=True, to_file="DecoderLayer.png")
ModelToCompare_layers = build_model()
modelToCompare = tf.keras.Model(inputs=[inputs],outputs=ModelToCompare_layers.call(inputs))
tf.keras.utils.plot_model(modelToCompare,show_shapes=True, show_layer_names=True, to_file="ModelToCompare.png")
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("KeyboardInterrupt received")

Binary file not shown.

After

Width:  |  Height:  |  Size: 116 KiB

View File

@@ -0,0 +1,31 @@
import tensorflow as tf
class DecoderLayers(tf.keras.Model):
def __init__(self):
super(DecoderLayers, self).__init__()
self.layer_list = [
tf.keras.layers.Conv2DTranspose(105, kernel_size=(3,3), strides=2, padding='same', name="Conv2D_Trans_0"),
tf.keras.layers.BatchNormalization(name="BatchNormalization_0"),
tf.keras.layers.Activation(tf.nn.tanh, name="tanh_0"),
tf.keras.layers.Conv2DTranspose(90, kernel_size=(3,3), strides=2, padding='same', name="Conv2D_Trans_1"),
tf.keras.layers.BatchNormalization(name="BatchNormalization_1"),
tf.keras.layers.Activation(tf.nn.tanh, name="tanh_1"),
tf.keras.layers.Conv2DTranspose(75, kernel_size=(3,3), strides=2, padding='same', name="Conv2D_Trans_2"),
tf.keras.layers.BatchNormalization(name="BatchNormalization_2"),
tf.keras.layers.Activation(tf.nn.tanh, name="tanh_2"),
# bottleneck to RGB
tf.keras.layers.Conv2DTranspose(2, kernel_size=(1,1), strides=1, padding='same', name="Conv2D_Trans_3"),
tf.keras.layers.BatchNormalization(name="BatchNormalization_3"),
tf.keras.layers.Activation(tf.nn.tanh, name="tanh_3"),
]
def call(self, x):
for layer in self.layer_list:
x = layer(x)
return x

Binary file not shown.

After

Width:  |  Height:  |  Size: 112 KiB

View File

@@ -0,0 +1,29 @@
import tensorflow as tf
class EncoderLayers(tf.keras.Model):
def __init__(self):
super(EncoderLayers, self).__init__()
self.layer_list = [
tf.keras.layers.Conv2D(75, kernel_size=(3, 3), strides=2, padding='same', name="Conv2D_0"),
tf.keras.layers.BatchNormalization(name="BatchNormalization_0"),
tf.keras.layers.Activation(tf.nn.tanh, name="tanh_0"),
tf.keras.layers.Conv2D(90, kernel_size=(3, 3), strides=2, padding='same', name="Conv2D_1"),
tf.keras.layers.BatchNormalization(name="BatchNormalization_1"),
tf.keras.layers.Activation(tf.nn.tanh, name="tanh_1"),
tf.keras.layers.Conv2D(105, kernel_size=(3, 3), strides=2, padding='same',name="Conv2D_2"),
tf.keras.layers.BatchNormalization(name="BatchNormalization_2"),
tf.keras.layers.Activation(tf.nn.tanh, name="tanh_2"),
tf.keras.layers.Conv2D(3, kernel_size=(1, 1), strides=1, padding='same', name="Conv2D_3"),
tf.keras.layers.BatchNormalization(name="BatchNormalization_3"),
tf.keras.layers.Activation(tf.nn.tanh, name="tanh_3"),
]
def call(self, x):
for layer in self.layer_list:
x = layer(x)
return x

Binary file not shown.

After

Width:  |  Height:  |  Size: 329 KiB

Binary file not shown.

Binary file not shown.

Binary file not shown.

BIN
Plots/TrainTestLossPlot.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1,15 @@
Wall time,Step,Value
1647118261.250931,0,0.016809336841106415
1647122922.100936,1,0.013759356923401356
1647127578.618303,2,0.01362006925046444
1647132237.117106,3,0.014061697758734226
1647136894.337419,4,0.013472857885062695
1647141548.038196,5,0.01342787966132164
1647146209.292402,6,0.013368184678256512
1647150861.434495,7,0.013420150615274906
1647155517.411057,8,0.01329082902520895
1647160207.188101,9,0.013379388488829136
1647164926.587916,10,0.013525118120014668
1647169761.401568,11,0.01334059052169323
1647174430.458649,12,0.013532023876905441
1647179221.740573,13,0.01326887309551239
1 Wall time Step Value
2 1647118261.250931 0 0.016809336841106415
3 1647122922.100936 1 0.013759356923401356
4 1647127578.618303 2 0.01362006925046444
5 1647132237.117106 3 0.014061697758734226
6 1647136894.337419 4 0.013472857885062695
7 1647141548.038196 5 0.01342787966132164
8 1647146209.292402 6 0.013368184678256512
9 1647150861.434495 7 0.013420150615274906
10 1647155517.411057 8 0.01329082902520895
11 1647160207.188101 9 0.013379388488829136
12 1647164926.587916 10 0.013525118120014668
13 1647169761.401568 11 0.01334059052169323
14 1647174430.458649 12 0.013532023876905441
15 1647179221.740573 13 0.01326887309551239

View File

@@ -0,0 +1,15 @@
Wall time,Step,Value
1647118226.379471,0,0.01686934195458889
1647122884.759068,1,0.016268473118543625
1647127535.214221,2,0.013647115789353848
1647132192.879982,3,0.013552550226449966
1647136850.629965,4,0.01349611859768629
1647141510.180662,5,0.013455081731081009
1647146165.61258,6,0.01342522632330656
1647150823.542946,7,0.013399843126535416
1647155473.882963,8,0.013378930278122425
1647160163.770788,9,0.013358119875192642
1647164886.509832,10,0.013342463411390781
1647169721.204018,11,0.013329868204891682
1647174386.649681,12,0.013316545635461807
1647179177.404204,13,0.013304967433214188
1 Wall time Step Value
2 1647118226.379471 0 0.01686934195458889
3 1647122884.759068 1 0.016268473118543625
4 1647127535.214221 2 0.013647115789353848
5 1647132192.879982 3 0.013552550226449966
6 1647136850.629965 4 0.01349611859768629
7 1647141510.180662 5 0.013455081731081009
8 1647146165.61258 6 0.01342522632330656
9 1647150823.542946 7 0.013399843126535416
10 1647155473.882963 8 0.013378930278122425
11 1647160163.770788 9 0.013358119875192642
12 1647164886.509832 10 0.013342463411390781
13 1647169721.204018 11 0.013329868204891682
14 1647174386.649681 12 0.013316545635461807
15 1647179177.404204 13 0.013304967433214188