diff --git a/1.9-gpu/example.py b/1.9-gpu/example.py new file mode 100644 index 0000000..f0915c2 --- /dev/null +++ b/1.9-gpu/example.py @@ -0,0 +1,18 @@ +import tensorflow as tf +mnist = tf.keras.datasets.mnist + +(x_train, y_train),(x_test, y_test) = mnist.load_data() +x_train, x_test = x_train / 255.0, x_test / 255.0 + +model = tf.keras.models.Sequential([ + tf.keras.layers.Flatten(), + tf.keras.layers.Dense(512, activation=tf.nn.relu), + tf.keras.layers.Dropout(0.2), + tf.keras.layers.Dense(10, activation=tf.nn.softmax) +]) +model.compile(optimizer='adam', + loss='sparse_categorical_crossentropy', + metrics=['accuracy']) + +model.fit(x_train, y_train, epochs=5) +model.evaluate(x_test, y_test) diff --git a/1.9-gpu/script.sh b/1.9-gpu/script.sh index 6b8eb47..1044233 100644 --- a/1.9-gpu/script.sh +++ b/1.9-gpu/script.sh @@ -1,16 +1,15 @@ #!/bin/bash #SBATCH --nodes=1 #SBATCH --time=1:0:0 #SBATCH --partition=gpu #SBATCH --gres=gpu:1 #SBATCH --qos=gpu source /ssoft/spack/bin/slmodules.sh -s x86_E5v2_Mellanox_GPU -source /scratch/nvarini/tensorflow-1.9.0-pip3/bin/activate +source /home/nvarini/tensorflow-pip-deneb/1.9-gpu/tensorflow-1.9/bin/activate -module load gcc/6.4.0 cuda/9.1.85 openmpi/3.0.1-cuda python cudnn -export LD_LIBRARY_PATH=/usr/lib64:$LD_LIBRARY_PATH -srun python tf_cnn_benchmarks.py --batch_size=64 --model=resnet50 --num_gpus=1 --data_format=NCHW --variable_update=replicated --local_parameter_device=gpu --num_batches=3000 +module load gcc/6.4.0 cuda/9.1.85 python cudnn +srun python3 example.py deactivate