Commit 68f45ceb authored by Gaëtan Caillaut's avatar Gaëtan Caillaut
Browse files

train semi-transforming models

parent dc89bf5a
#!/bin/bash
#SBATCH -N 1
#SBATCH -p gpu
#SBATCH --gres gpu:rtx6000:1
#SBATCH --job-name mlm-semitrans
#SBATCH --time 10-0
#SBATCH --mem 20G
#SBATCH -o logs/out-%j.txt
#SBATCH -e logs/err-%j.txt
#SBATCH --mail-type=ALL
#SBATCH --mail-user=gaetan.caillaut@univ-lemans.fr
eval "$(conda shell.bash hook)"
conda activate polysemy
TRAIN="data/cleaned/t1/train.csv"
DEV="data/cleaned/t1/dev.csv"
TEST="data/cleaned/t1/test.csv"
TOKENIZER="output/tokenizer.json"
OUT_DIR="models/cleaned"
BS=512
DEVICE="cuda"
LOGDIR="runs/cleaned"
for d in ${OUT_DIR} ${LOGDIR}; do
if [ ! -d ${d} ]; then
mkdir -p ${d}
fi
done
export PYTHONPATH="/lium/raid01_b/gcaillaut/polysemy/minibert-oscar/minibert:${PYTHONPATH}"
set -x
set -e
for E in $(seq -f "%05g" 0 10 40); do
for D in 16 32 64 96 128; do
for ATT in "semi-transforming"; do
for POS in "none" "fixed" "trained"; do
RUN_NAME="d${D}_${ATT}_${POS}_gelu_norm"
TB_DIR="${LOGDIR}/${RUN_NAME}"
if ((10#$E>0)); then
CHECKPOINT="${OUT_DIR}/${RUN_NAME}/checkpoint-${E}.tar"
python train.py mlm ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} -o ${OUT_DIR} -d ${D} --bs ${BS} --epochs 10 --attention ${ATT} --position ${POS} --device ${DEVICE} --checkpoint ${CHECKPOINT} --logdir ${TB_DIR}
else
python train.py mlm ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} -o ${OUT_DIR} -d ${D} --bs ${BS} --epochs 10 --attention ${ATT} --position ${POS} --device ${DEVICE} --logdir ${TB_DIR}
fi
done
done
done
done
\ No newline at end of file
#!/bin/bash
#SBATCH -N 1
#SBATCH -p gpu
#SBATCH --gres gpu:rtx6000:1
#SBATCH --job-name mlm-semitrans-lemmatized
#SBATCH --time 10-0
#SBATCH --mem 20G
#SBATCH -o logs/out-%j.txt
#SBATCH -e logs/err-%j.txt
#SBATCH --mail-type=ALL
#SBATCH --mail-user=gaetan.caillaut@univ-lemans.fr
eval "$(conda shell.bash hook)"
conda activate polysemy
TRAIN="data/lemmatized/t1/train.csv"
DEV="data/lemmatized/t1/dev.csv"
TEST="data/lemmatized/t1/test.csv"
TOKENIZER="output/tokenizer_lemmatized.json"
OUT_DIR="models/lemmatized"
BS=512
DEVICE="cuda"
LOGDIR="runs/lemmatized"
for d in ${OUT_DIR} ${LOGDIR}; do
if [ ! -d ${d} ]; then
mkdir -p ${d}
fi
done
export PYTHONPATH="/lium/raid01_b/gcaillaut/polysemy/minibert-oscar/minibert:${PYTHONPATH}"
set -x
set -e
for E in $(seq -f "%05g" 0 10 40); do
for D in 16 32 64 96 128; do
for ATT in "semi-transforming"; do
for POS in "none" "fixed" "trained"; do
RUN_NAME="d${D}_${ATT}_${POS}_gelu_norm"
TB_DIR="${LOGDIR}/${RUN_NAME}"
if ((10#$E>0)); then
CHECKPOINT="${OUT_DIR}/${RUN_NAME}/checkpoint-${E}.tar"
python train.py mlm ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} -o ${OUT_DIR} -d ${D} --bs ${BS} --epochs 10 --attention ${ATT} --position ${POS} --device ${DEVICE} --checkpoint ${CHECKPOINT} --logdir ${TB_DIR}
else
python train.py mlm ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} -o ${OUT_DIR} -d ${D} --bs ${BS} --epochs 10 --attention ${ATT} --position ${POS} --device ${DEVICE} --logdir ${TB_DIR}
fi
done
done
done
done
\ No newline at end of file
#!/bin/bash
#SBATCH -N 1
#SBATCH -p gpu
#SBATCH --gres gpu:rtx6000:1
#SBATCH --job-name t1-semitrans
#SBATCH --time 10-0
#SBATCH --mem 20G
#SBATCH -o logs/out-%j.txt
#SBATCH -e logs/err-%j.txt
#SBATCH --mail-type=ALL
#SBATCH --mail-user=gaetan.caillaut@univ-lemans.fr
eval "$(conda shell.bash hook)"
conda activate polysemy
TRAIN="data/cleaned/t1/train.csv"
DEV="data/cleaned/t1/dev.csv"
TEST="data/cleaned/t1/test.csv"
TOKENIZER="output/tokenizer.json"
PRETRAINED_DIR="models/cleaned"
OUT_DIR="models/t1/cleaned"
BS=512
DEVICE="cuda"
LOGDIR="runs/t1/cleaned"
for d in ${OUT_DIR} ${LOGDIR}; do
if [ ! -d ${d} ]; then
mkdir -p ${d}
fi
done
export PYTHONPATH="/lium/raid01_b/gcaillaut/polysemy/minibert:${PYTHONPATH}"
set -x
set -e
for E in $(seq -f "%05g" 0 10 40); do
for D in 16 32 64 96 128; do
for ATT in "semi-transforming"; do
for POS in "none" "fixed" "trained"; do
MLM_RUN_NAME="d${D}_${ATT}_${POS}_gelu_norm"
T1_RUN_NAME="d${D}_${ATT}_${POS}_norm"
TB_DIR="${LOGDIR}/${T1_RUN_NAME}"
if ((10#$E>0)); then
CHECKPOINT="${OUT_DIR}/${T1_RUN_NAME}/checkpoint-${E}.tar"
python train.py t1 ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} "${PRETRAINED_DIR}/${MLM_RUN_NAME}/minibert-model.pt" -o ${OUT_DIR} -d ${D} --attention ${ATT} --position ${POS} --epochs 10 --bs ${BS} --device ${DEVICE} --logdir ${TB_DIR} --checkpoint ${CHECKPOINT}
else
python train.py t1 ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} "${PRETRAINED_DIR}/${MLM_RUN_NAME}/minibert-model.pt" -o ${OUT_DIR} -d ${D} --attention ${ATT} --position ${POS} --epochs 10 --bs ${BS} --device ${DEVICE} --logdir ${TB_DIR}
fi
done
done
done
done
\ No newline at end of file
#!/bin/bash
#SBATCH -N 1
#SBATCH -p gpu
#SBATCH --gres gpu:rtx6000:1
#SBATCH --job-name t1-semitrans-frozen
#SBATCH --time 10-0
#SBATCH --mem 20G
#SBATCH -o logs/out-%j.txt
#SBATCH -e logs/err-%j.txt
#SBATCH --mail-type=ALL
#SBATCH --mail-user=gaetan.caillaut@univ-lemans.fr
eval "$(conda shell.bash hook)"
conda activate polysemy
TRAIN="data/cleaned/t1/train.csv"
DEV="data/cleaned/t1/dev.csv"
TEST="data/cleaned/t1/test.csv"
TOKENIZER="output/tokenizer.json"
PRETRAINED_DIR="models/cleaned"
OUT_DIR="models/t1/cleaned"
BS=512
DEVICE="cuda"
LOGDIR="runs/t1/cleaned"
for d in ${OUT_DIR} ${LOGDIR}; do
if [ ! -d ${d} ]; then
mkdir -p ${d}
fi
done
export PYTHONPATH="/lium/raid01_b/gcaillaut/polysemy/minibert:${PYTHONPATH}"
set -x
set -e
for E in $(seq -f "%05g" 0 10 40); do
for D in 16 32 64 96 128; do
for ATT in "semi-transforming"; do
for POS in "none" "fixed" "trained"; do
MLM_RUN_NAME="d${D}_${ATT}_${POS}_gelu_norm"
T1_RUN_NAME="d${D}_${ATT}_${POS}_norm_frozen"
TB_DIR="${LOGDIR}/${T1_RUN_NAME}"
if ((10#$E>0)); then
CHECKPOINT="${OUT_DIR}/${T1_RUN_NAME}/checkpoint-${E}.tar"
python train.py t1 ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} "${PRETRAINED_DIR}/${MLM_RUN_NAME}/minibert-model.pt" -o ${OUT_DIR} -d ${D} --attention ${ATT} --position ${POS} --epochs 10 --bs ${BS} --device ${DEVICE} --logdir ${TB_DIR} --checkpoint ${CHECKPOINT} --freeze-attention
else
python train.py t1 ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} "${PRETRAINED_DIR}/${MLM_RUN_NAME}/minibert-model.pt" -o ${OUT_DIR} -d ${D} --attention ${ATT} --position ${POS} --epochs 10 --bs ${BS} --device ${DEVICE} --logdir ${TB_DIR} --freeze-attention
fi
done
done
done
done
\ No newline at end of file
#!/bin/bash
#SBATCH -N 1
#SBATCH -p gpu
#SBATCH --gres gpu:rtx6000:1
#SBATCH --job-name t1_fs-semitrans
#SBATCH --time 10-0
#SBATCH --mem 20G
#SBATCH -o logs/out-%j.txt
#SBATCH -e logs/err-%j.txt
#SBATCH --mail-type=ALL
#SBATCH --mail-user=gaetan.caillaut@univ-lemans.fr
eval "$(conda shell.bash hook)"
conda activate polysemy
TRAIN="data/cleaned/t1/train.csv"
DEV="data/cleaned/t1/dev.csv"
TEST="data/cleaned/t1/test.csv"
TOKENIZER="output/tokenizer.json"
PRETRAINED_DIR="models/cleaned"
OUT_DIR="models/t1_fs/cleaned"
BS=512
DEVICE="cuda"
LOGDIR="runs/t1_fs/cleaned"
for d in ${OUT_DIR} ${LOGDIR}; do
if [ ! -d ${d} ]; then
mkdir -p ${d}
fi
done
export PYTHONPATH="/lium/raid01_b/gcaillaut/polysemy/minibert:${PYTHONPATH}"
set -x
set -e
for E in $(seq -f "%05g" 0 10 40); do
for D in 16 32 64 96 128; do
for ATT in "semi-transforming"; do
for POS in "none" "fixed" "trained"; do
MLM_RUN_NAME="d${D}_${ATT}_${POS}_gelu_norm"
T1_RUN_NAME="d${D}_${ATT}_${POS}_norm"
TB_DIR="${LOGDIR}/${T1_RUN_NAME}"
if ((10#$E>0)); then
CHECKPOINT="${OUT_DIR}/${T1_RUN_NAME}/checkpoint-${E}.tar"
python train.py t1-fs ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} -o ${OUT_DIR} -d ${D} --attention ${ATT} --position ${POS} --epochs 10 --bs ${BS} --device ${DEVICE} --logdir ${TB_DIR} --checkpoint ${CHECKPOINT}
else
python train.py t1-fs ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} -o ${OUT_DIR} -d ${D} --attention ${ATT} --position ${POS} --epochs 10 --bs ${BS} --device ${DEVICE} --logdir ${TB_DIR}
fi
done
done
done
done
\ No newline at end of file
#!/bin/bash
#SBATCH -N 1
#SBATCH -p gpu
#SBATCH --gres gpu:rtx6000:1
#SBATCH --job-name t1_fs-semitrans-lemmatized
#SBATCH --time 10-0
#SBATCH --mem 20G
#SBATCH -o logs/out-%j.txt
#SBATCH -e logs/err-%j.txt
#SBATCH --mail-type=ALL
#SBATCH --mail-user=gaetan.caillaut@univ-lemans.fr
eval "$(conda shell.bash hook)"
conda activate polysemy
TRAIN="data/lemmatized/t1/train.csv"
DEV="data/lemmatized/t1/dev.csv"
TEST="data/lemmatized/t1/test.csv"
TOKENIZER="output/tokenizer_lemmatized.json"
PRETRAINED_DIR="models/lemmatized"
OUT_DIR="models/t1_fs/lemmatized"
BS=512
DEVICE="cuda"
LOGDIR="runs/t1_fs/lemmatized"
for d in ${OUT_DIR} ${LOGDIR}; do
if [ ! -d ${d} ]; then
mkdir -p ${d}
fi
done
export PYTHONPATH="/lium/raid01_b/gcaillaut/polysemy/minibert:${PYTHONPATH}"
set -x
set -e
for E in $(seq -f "%05g" 0 10 40); do
for D in 16 32 64 96 128; do
for ATT in "semi-transforming"; do
for POS in "none" "fixed" "trained"; do
MLM_RUN_NAME="d${D}_${ATT}_${POS}_gelu_norm"
T1_RUN_NAME="d${D}_${ATT}_${POS}_norm"
TB_DIR="${LOGDIR}/${T1_RUN_NAME}"
if ((10#$E>0)); then
CHECKPOINT="${OUT_DIR}/${T1_RUN_NAME}/checkpoint-${E}.tar"
python train.py t1-fs ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} -o ${OUT_DIR} -d ${D} --attention ${ATT} --position ${POS} --epochs 10 --bs ${BS} --device ${DEVICE} --logdir ${TB_DIR} --checkpoint ${CHECKPOINT}
else
python train.py t1-fs ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} -o ${OUT_DIR} -d ${D} --attention ${ATT} --position ${POS} --epochs 10 --bs ${BS} --device ${DEVICE} --logdir ${TB_DIR}
fi
done
done
done
done
\ No newline at end of file
#!/bin/bash
#SBATCH -N 1
#SBATCH -p gpu
#SBATCH --gres gpu:rtx6000:1
#SBATCH --job-name t1-semitrans-lemmatized
#SBATCH --time 10-0
#SBATCH --mem 20G
#SBATCH -o logs/out-%j.txt
#SBATCH -e logs/err-%j.txt
#SBATCH --mail-type=ALL
#SBATCH --mail-user=gaetan.caillaut@univ-lemans.fr
eval "$(conda shell.bash hook)"
conda activate polysemy
TRAIN="data/lemmatized/t1/train.csv"
DEV="data/lemmatized/t1/dev.csv"
TEST="data/lemmatized/t1/test.csv"
TOKENIZER="output/tokenizer_lemmatized.json"
PRETRAINED_DIR="models/lemmatized"
OUT_DIR="models/t1/lemmatized"
BS=512
DEVICE="cuda"
LOGDIR="runs/t1/lemmatized"
for d in ${OUT_DIR} ${LOGDIR}; do
if [ ! -d ${d} ]; then
mkdir -p ${d}
fi
done
export PYTHONPATH="/lium/raid01_b/gcaillaut/polysemy/minibert:${PYTHONPATH}"
set -x
set -e
for E in $(seq -f "%05g" 0 10 40); do
for D in 16 32 64 96 128; do
for ATT in "semi-transforming"; do
for POS in "none" "fixed" "trained"; do
MLM_RUN_NAME="d${D}_${ATT}_${POS}_gelu_norm"
T1_RUN_NAME="d${D}_${ATT}_${POS}_norm"
TB_DIR="${LOGDIR}/${T1_RUN_NAME}"
if ((10#$E>0)); then
CHECKPOINT="${OUT_DIR}/${T1_RUN_NAME}/checkpoint-${E}.tar"
python train.py t1 ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} "${PRETRAINED_DIR}/${MLM_RUN_NAME}/mi\
nibert-model.pt" -o ${OUT_DIR} -d ${D} --attention ${ATT} --position ${POS} --epochs 10 --bs ${BS} --device ${DEVICE} --logdir ${TB_DIR} --checkpoi\
nt ${CHECKPOINT}
else
python train.py t1 ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} "${PRETRAINED_DIR}/${MLM_RUN_NAME}/mi\
nibert-model.pt" -o ${OUT_DIR} -d ${D} --attention ${ATT} --position ${POS} --epochs 10 --bs ${BS} --device ${DEVICE} --logdir ${TB_DIR}
fi
done
done
done
done
\ No newline at end of file
#!/bin/bash
#SBATCH -N 1
#SBATCH -p gpu
#SBATCH --gres gpu:rtx6000:1
#SBATCH --job-name t1-semitrans-lemmatized-frozen
#SBATCH --time 10-0
#SBATCH --mem 20G
#SBATCH -o logs/out-%j.txt
#SBATCH -e logs/err-%j.txt
#SBATCH --mail-type=ALL
#SBATCH --mail-user=gaetan.caillaut@univ-lemans.fr
eval "$(conda shell.bash hook)"
conda activate polysemy
TRAIN="data/lemmatized/t1/train.csv"
DEV="data/lemmatized/t1/dev.csv"
TEST="data/lemmatized/t1/test.csv"
TOKENIZER="output/tokenizer_lemmatized.json"
PRETRAINED_DIR="models/lemmatized"
OUT_DIR="models/t1/lemmatized"
BS=512
DEVICE="cuda"
LOGDIR="runs/t1/lemmatized"
for d in ${OUT_DIR} ${LOGDIR}; do
if [ ! -d ${d} ]; then
mkdir -p ${d}
fi
done
export PYTHONPATH="/lium/raid01_b/gcaillaut/polysemy/minibert:${PYTHONPATH}"
set -x
set -e
for E in $(seq -f "%05g" 0 10 40); do
for D in 16 32 64 96 128; do
for ATT in "semi-transforming"; do
for POS in "none" "fixed" "trained"; do
MLM_RUN_NAME="d${D}_${ATT}_${POS}_gelu_norm"
T1_RUN_NAME="d${D}_${ATT}_${POS}_norm_frozen"
TB_DIR="${LOGDIR}/${T1_RUN_NAME}"
if ((10#$E>0)); then
CHECKPOINT="${OUT_DIR}/${T1_RUN_NAME}/checkpoint-${E}.tar"
python train.py t1 ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} "${PRETRAINED_DIR}/${MLM_RUN_NAME}/minibert-model.pt" -o ${OUT_DIR} -d ${D} --attention ${ATT} --position ${POS} --epochs 10 --bs ${BS} --device ${DEVICE} --logdir ${TB_DIR} --checkpoint ${CHECKPOINT} --freeze-attention
else
python train.py t1 ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} "${PRETRAINED_DIR}/${MLM_RUN_NAME}/minibert-model.pt" -o ${OUT_DIR} -d ${D} --attention ${ATT} --position ${POS} --epochs 10 --bs ${BS} --device ${DEVICE} --logdir ${TB_DIR} --freeze-attention
fi
done
done
done
done
\ No newline at end of file
#!/bin/bash
#SBATCH -N 1
#SBATCH -p gpu
#SBATCH --gres gpu:rtx6000:1
#SBATCH --job-name t2-semitrans
#SBATCH --time 10-0
#SBATCH --mem 20G
#SBATCH -o logs/out-%j.txt
#SBATCH -e logs/err-%j.txt
#SBATCH --mail-type=ALL
#SBATCH --mail-user=gaetan.caillaut@univ-lemans.fr
eval "$(conda shell.bash hook)"
conda activate polysemy
TRAIN="data/cleaned/t2/train.csv"
DEV="data/cleaned/t2/dev.csv"
TEST="data/cleaned/t2/test.csv"
TOKENIZER="output/tokenizer.json"
PRETRAINED_DIR="models/cleaned"
OUT_DIR="models/t2/cleaned"
BS=512
DEVICE="cuda"
LOGDIR="runs/t2/cleaned"
for d in ${OUT_DIR} ${LOGDIR}; do
if [ ! -d ${d} ]; then
mkdir -p ${d}
fi
done
export PYTHONPATH="/lium/raid01_b/gcaillaut/polysemy/minibert:${PYTHONPATH}"
set -x
set -e
for E in $(seq -f "%05g" 0 10 40); do
for D in 16 32 64 96 128; do
for ATT in "semi-transforming"; do
for POS in "none" "fixed" "trained"; do
MLM_RUN_NAME="d${D}_${ATT}_${POS}_gelu_norm"
T2_RUN_NAME="d${D}_${ATT}_${POS}_norm"
TB_DIR="${LOGDIR}/${T2_RUN_NAME}"
if ((10#$E>0)); then
CHECKPOINT="${OUT_DIR}/${T2_RUN_NAME}/checkpoint-${E}.tar"
python train.py t2 ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} "${PRETRAINED_DIR}/${MLM_RUN_NAME}/minibert-model.pt" -o ${OUT_DIR} -d ${D} --attention ${ATT} --position ${POS} --epochs 10 --bs ${BS} --device ${DEVICE} --logdir ${TB_DIR} --checkpoint ${CHECKPOINT}
else
python train.py t2 ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} "${PRETRAINED_DIR}/${MLM_RUN_NAME}/minibert-model.pt" -o ${OUT_DIR} -d ${D} --attention ${ATT} --position ${POS} --epochs 10 --bs ${BS} --device ${DEVICE} --logdir ${TB_DIR}
fi
done
done
done
done
\ No newline at end of file
#!/bin/bash
#SBATCH -N 1
#SBATCH -p gpu
#SBATCH --gres gpu:rtx6000:1
#SBATCH --job-name t2-semitrans-frozen
#SBATCH --time 10-0
#SBATCH --mem 20G
#SBATCH -o logs/out-%j.txt
#SBATCH -e logs/err-%j.txt
#SBATCH --mail-type=ALL
#SBATCH --mail-user=gaetan.caillaut@univ-lemans.fr
eval "$(conda shell.bash hook)"
conda activate polysemy
TRAIN="data/cleaned/t2/train.csv"
DEV="data/cleaned/t2/dev.csv"
TEST="data/cleaned/t2/test.csv"
TOKENIZER="output/tokenizer.json"
PRETRAINED_DIR="models/cleaned"
OUT_DIR="models/t2/cleaned"
BS=512
DEVICE="cuda"
LOGDIR="runs/t2/cleaned"
for d in ${OUT_DIR} ${LOGDIR}; do
if [ ! -d ${d} ]; then
mkdir -p ${d}
fi
done
export PYTHONPATH="/lium/raid01_b/gcaillaut/polysemy/minibert:${PYTHONPATH}"
set -x
set -e
for E in $(seq -f "%05g" 0 10 40); do
for D in 16 32 64 96 128; do
for ATT in "semi-transforming"; do
for POS in "none" "fixed" "trained"; do
MLM_RUN_NAME="d${D}_${ATT}_${POS}_gelu_norm"
T2_RUN_NAME="d${D}_${ATT}_${POS}_norm_frozen"
TB_DIR="${LOGDIR}/${T2_RUN_NAME}"
if ((10#$E>0)); then
CHECKPOINT="${OUT_DIR}/${T2_RUN_NAME}/checkpoint-${E}.tar"
python train.py t2 ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} "${PRETRAINED_DIR}/${MLM_RUN_NAME}/minibert-model.pt" -o ${OUT_DIR} -d ${D} --attention ${ATT} --position ${POS} --epochs 10 --bs ${BS} --device ${DEVICE} --logdir ${TB_DIR} --checkpoint ${CHECKPOINT} --freeze-attention
else
python train.py t2 ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} "${PRETRAINED_DIR}/${MLM_RUN_NAME}/minibert-model.pt" -o ${OUT_DIR} -d ${D} --attention ${ATT} --position ${POS} --epochs 10 --bs ${BS} --device ${DEVICE} --logdir ${TB_DIR} --freeze-attention
fi
done
done
done
done
\ No newline at end of file
#!/bin/bash
#SBATCH -N 1
#SBATCH -p gpu
#SBATCH --gres gpu:rtx6000:1
#SBATCH --job-name t2_fs-semitrans
#SBATCH --time 10-0
#SBATCH --mem 20G
#SBATCH -o logs/out-%j.txt
#SBATCH -e logs/err-%j.txt
#SBATCH --mail-type=ALL
#SBATCH --mail-user=gaetan.caillaut@univ-lemans.fr
eval "$(conda shell.bash hook)"
conda activate polysemy
TRAIN="data/cleaned/t2/train.csv"
DEV="data/cleaned/t2/dev.csv"
TEST="data/cleaned/t2/test.csv"
TOKENIZER="output/tokenizer.json"
PRETRAINED_DIR="models/cleaned"
OUT_DIR="models/t2_fs/cleaned"
BS=512
DEVICE="cuda"
LOGDIR="runs/t2_fs/cleaned"
for d in ${OUT_DIR} ${LOGDIR}; do
if [ ! -d ${d} ]; then
mkdir -p ${d}
fi
done