#!/bin/bash

# Job name:
#SBATCH --job-name=test #YourJobname
#
# Project:
#SBATCH --account=nn9447k
#
# Wall clock limit:
#SBATCH --time=1:00:00
#
# Max memory usage:
#SBATCH --mem-per-cpu=3000
# Number of cores:
#SBATCH --cpus-per-task=4
#GPU jobs, with 1 gpu
#SBATCH --partition=accel --gres=gpu:1

## Set up job environment:
module purge
module use -a /projects/nlpl/software/modulefiles
module load nlpl-opennmt-py
set -o errexit # exit on errors

CUDA_VISIBLE_DEVICES=0

ROOT_DIR=/projects/nlpl/software/opennmt-py/0.2.1/scripts
DATA_DIR=/usit/abel/u1/gtang/corpus/sven
## Copy input files to the work directory:
#cp corpus/sven/sven.word.train.1.pt corpus/sven/sven.word.valid.1.pt corpus/sven/sven.word.vocab.pt  $SCRATCH

## Make sure the results are copied back to the submit directory (see Work Directory below):
#chkfile ResultDir

## Do some work:
#cd $SCRATCH
OUTPUT_DIR=/usit/abel/u1/gtang
#YourCommands
python $ROOT_DIR/train.py \
  -data $DATA_DIR/sven.word \
  -save_model $OUTPUT_DIR/model_word \
  -share_embeddings \
  -share_decoder_embeddings \
  -src_word_vec_size 128 \
  -tgt_word_vec_size 128 \
  -encoder_type brnn \
  -decoder_type rnn \
  -rnn_type LSTM \
  -layers 2 \
  -rnn_size 128 \
  -bridge \
  -global_attention general \
  -global_attention_function softmax \
  -save_checkpoint_steps 400 \
  -keep_checkpoint 6 \
  -gpu_rank 1 \
  -batch_type tokens \
  -batch_size 200 \
  -optim adam \
  -learning_rate 0.01 \
  -valid_steps 400 \
  -train_steps 5200 \
  -dropout 0.1 \
  -report_every 20 
 


