Commit da7fb2e8 authored by Gaëtan Caillaut's avatar Gaëtan Caillaut
Browse files

support for training h1d2 models

parent 3074b260
#!/bin/bash
#SBATCH -N 1
#SBATCH -p gpu
#SBATCH --gres gpu:rtx6000:1
#SBATCH --job-name mlm-h1d2
#SBATCH --time 10-0
#SBATCH --mem 20G
#SBATCH -o logs/out-%j.txt
#SBATCH -e logs/err-%j.txt
#SBATCH --mail-type=ALL
#SBATCH --mail-user=gaetan.caillaut@univ-lemans.fr
eval "$(conda shell.bash hook)"
conda activate polysemy
TRAIN="data/cleaned/t1/train.csv"
DEV="data/cleaned/t1/dev.csv"
TEST="data/cleaned/t1/test.csv"
TOKENIZER="output/tokenizer.json"
OUT_DIR="models/cleaned"
BS=256
DEVICE="cuda"
LOGDIR="runs/cleaned"
for d in ${OUT_DIR} ${LOGDIR}; do
if [ ! -d ${d} ]; then
mkdir -p ${d}
fi
done
export PYTHONPATH="/lium/raid01_b/gcaillaut/polysemy/minibert:${PYTHONPATH}"
set -x
set -e
for E in $(seq -f "%05g" 0 10 40); do
for D in 16 32 64; do
for ATT in "self-attention" "non-transforming" "semi-transforming"; do
for POS in "none" "fixed" "trained"; do
RUN_NAME="d${D}_${ATT}_${POS}_gelu_norm_h1d2"
TB_DIR="${LOGDIR}/${RUN_NAME}"
if ((10#$E>0)); then
CHECKPOINT="${OUT_DIR}/${RUN_NAME}/checkpoint-${E}.tar"
python train.py mlm ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} -o ${OUT_DIR} -d ${D} --bs ${BS} --epochs 10 --attention ${ATT} --position ${POS} --device ${DEVICE} --checkpoint ${CHECKPOINT} --logdir ${TB_DIR} --height 1 --depth 2
else
python train.py mlm ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} -o ${OUT_DIR} -d ${D} --bs ${BS} --epochs 10 --attention ${ATT} --position ${POS} --device ${DEVICE} --logdir ${TB_DIR} --height 1 --depth 2
fi
done
done
done
done
\ No newline at end of file
#!/bin/bash
#SBATCH -N 1
#SBATCH -p gpu
#SBATCH --gres gpu:rtx6000:1
#SBATCH --job-name mlm-lemmatized
#SBATCH --time 10-0
#SBATCH --mem 20G
#SBATCH -o logs/out-%j.txt
#SBATCH -e logs/err-%j.txt
#SBATCH --mail-type=ALL
#SBATCH --mail-user=gaetan.caillaut@univ-lemans.fr
eval "$(conda shell.bash hook)"
conda activate polysemy
TRAIN="data/lemmatized/t1/train.csv"
DEV="data/lemmatized/t1/dev.csv"
TEST="data/lemmatized/t1/test.csv"
TOKENIZER="output/tokenizer_lemmatized.json"
OUT_DIR="models/lemmatized"
BS=256
DEVICE="cuda"
LOGDIR="runs/lemmatized"
for d in ${OUT_DIR} ${LOGDIR}; do
if [ ! -d ${d} ]; then
mkdir -p ${d}
fi
done
export PYTHONPATH="/lium/raid01_b/gcaillaut/polysemy/minibert:${PYTHONPATH}"
set -x
set -e
for E in $(seq -f "%05g" 0 10 40); do
for D in 16 32 64; do
for ATT in "self-attention" "non-transforming" "semi-transforming"; do
for POS in "none" "fixed" "trained"; do
RUN_NAME="d${D}_${ATT}_${POS}_gelu_norm_h1d2"
TB_DIR="${LOGDIR}/${RUN_NAME}"
if ((10#$E>0)); then
CHECKPOINT="${OUT_DIR}/${RUN_NAME}/checkpoint-${E}.tar"
python train.py mlm ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} -o ${OUT_DIR} -d ${D} --bs ${BS} --epochs 10 --attention ${ATT} --position ${POS} --device ${DEVICE} --checkpoint ${CHECKPOINT} --logdir ${TB_DIR} --height 1 --depth 2
else
python train.py mlm ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} -o ${OUT_DIR} -d ${D} --bs ${BS} --epochs 10 --attention ${ATT} --position ${POS} --device ${DEVICE} --logdir ${TB_DIR} --height 1 --depth 2
fi
done
done
done
done
\ No newline at end of file
#!/bin/bash
#SBATCH -N 1
#SBATCH -p gpu
#SBATCH --gres gpu:rtx6000:1
#SBATCH --job-name t1_fs-h1d2
#SBATCH --time 10-0
#SBATCH --mem 20G
#SBATCH -o logs/out-%j.txt
#SBATCH -e logs/err-%j.txt
#SBATCH --mail-type=ALL
#SBATCH --mail-user=gaetan.caillaut@univ-lemans.fr
eval "$(conda shell.bash hook)"
conda activate polysemy
TRAIN="data/cleaned/t1/train.csv"
DEV="data/cleaned/t1/dev.csv"
TEST="data/cleaned/t1/test.csv"
TOKENIZER="output/tokenizer.json"
PRETRAINED_DIR="models/cleaned"
OUT_DIR="models/t1_fs/cleaned"
BS=256
DEVICE="cuda"
LOGDIR="runs/t1_fs/cleaned"
for d in ${OUT_DIR} ${LOGDIR}; do
if [ ! -d ${d} ]; then
mkdir -p ${d}
fi
done
export PYTHONPATH="/lium/raid01_b/gcaillaut/polysemy/minibert:${PYTHONPATH}"
set -x
set -e
for E in $(seq -f "%05g" 0 10 40); do
for D in 16 32 64; do
for ATT in "self-attention" "non-transforming" "semi-transforming"; do
for POS in "none" "fixed" "trained"; do
MLM_RUN_NAME="d${D}_${ATT}_${POS}_gelu_norm_h1d2"
T1_RUN_NAME="d${D}_${ATT}_${POS}_norm_h1d2"
TB_DIR="${LOGDIR}/${T1_RUN_NAME}"
if ((10#$E>0)); then
CHECKPOINT="${OUT_DIR}/${T1_RUN_NAME}/checkpoint-${E}.tar"
python train.py t1-fs ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} -o ${OUT_DIR} -d ${D} --attention ${ATT} --position ${POS} --epochs 10 --bs ${BS} --device ${DEVICE} --logdir ${TB_DIR} --checkpoint ${CHECKPOINT} --height 1 --depth 2
else
python train.py t1-fs ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} -o ${OUT_DIR} -d ${D} --attention ${ATT} --position ${POS} --epochs 10 --bs ${BS} --device ${DEVICE} --logdir ${TB_DIR} --height 1 --depth 2
fi
done
done
done
done
\ No newline at end of file
#!/bin/bash
#SBATCH -N 1
#SBATCH -p gpu
#SBATCH --gres gpu:rtx6000:1
#SBATCH --job-name t1_fs-lemmatized-h1d2
#SBATCH --time 10-0
#SBATCH --mem 20G
#SBATCH -o logs/out-%j.txt
#SBATCH -e logs/err-%j.txt
#SBATCH --mail-type=ALL
#SBATCH --mail-user=gaetan.caillaut@univ-lemans.fr
eval "$(conda shell.bash hook)"
conda activate polysemy
TRAIN="data/lemmatized/t1/train.csv"
DEV="data/lemmatized/t1/dev.csv"
TEST="data/lemmatized/t1/test.csv"
TOKENIZER="output/tokenizer.json"
PRETRAINED_DIR="models/lemmatized"
OUT_DIR="models/t1_fs/lemmatized"
BS=256
DEVICE="cuda"
LOGDIR="runs/t1_fs/lemmatized"
for d in ${OUT_DIR} ${LOGDIR}; do
if [ ! -d ${d} ]; then
mkdir -p ${d}
fi
done
export PYTHONPATH="/lium/raid01_b/gcaillaut/polysemy/minibert:${PYTHONPATH}"
set -x
set -e
for E in $(seq -f "%05g" 0 10 40); do
for D in 16 32 64; do
for ATT in "self-attention" "non-transforming" "semi-transforming"; do
for POS in "none" "fixed" "trained"; do
MLM_RUN_NAME="d${D}_${ATT}_${POS}_gelu_norm_h1d2"
T1_RUN_NAME="d${D}_${ATT}_${POS}_norm_h1d2"
TB_DIR="${LOGDIR}/${T1_RUN_NAME}"
if ((10#$E>0)); then
CHECKPOINT="${OUT_DIR}/${T1_RUN_NAME}/checkpoint-${E}.tar"
python train.py t1-fs ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} -o ${OUT_DIR} -d ${D} --attention ${ATT} --position ${POS} --epochs 10 --bs ${BS} --device ${DEVICE} --logdir ${TB_DIR} --checkpoint ${CHECKPOINT} --height 1 --depth 2
else
python train.py t1-fs ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} -o ${OUT_DIR} -d ${D} --attention ${ATT} --position ${POS} --epochs 10 --bs ${BS} --device ${DEVICE} --logdir ${TB_DIR} --height 1 --depth 2
fi
done
done
done
done
\ No newline at end of file
#!/bin/bash
#SBATCH -N 1
#SBATCH -p gpu
#SBATCH --gres gpu:rtx6000:1
#SBATCH --job-name t2_fs-h1d2
#SBATCH --time 10-0
#SBATCH --mem 20G
#SBATCH -o logs/out-%j.txt
#SBATCH -e logs/err-%j.txt
#SBATCH --mail-type=ALL
#SBATCH --mail-user=gaetan.caillaut@univ-lemans.fr
eval "$(conda shell.bash hook)"
conda activate polysemy
TRAIN="data/cleaned/t2/train.csv"
DEV="data/cleaned/t2/dev.csv"
TEST="data/cleaned/t2/test.csv"
TOKENIZER="output/tokenizer.json"
PRETRAINED_DIR="models/cleaned"
OUT_DIR="models/t2_fs/cleaned"
BS=256
DEVICE="cuda"
LOGDIR="runs/t2_fs/cleaned"
for d in ${OUT_DIR} ${LOGDIR}; do
if [ ! -d ${d} ]; then
mkdir -p ${d}
fi
done
export PYTHONPATH="/lium/raid01_b/gcaillaut/polysemy/minibert:${PYTHONPATH}"
set -x
set -e
for E in $(seq -f "%05g" 0 10 40); do
for D in 16 32 64; do
for ATT in "self-attention" "non-transforming" "semi-transforming"; do
for POS in "none" "fixed" "trained"; do
MLM_RUN_NAME="d${D}_${ATT}_${POS}_gelu_norm_h1d2"
T2_RUN_NAME="d${D}_${ATT}_${POS}_norm_h1d2"
TB_DIR="${LOGDIR}/${T2_RUN_NAME}"
if ((10#$E>0)); then
CHECKPOINT="${OUT_DIR}/${T2_RUN_NAME}/checkpoint-${E}.tar"
python train.py t2-fs ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} -o ${OUT_DIR} -d ${D} --attention ${ATT} --position ${POS} --epochs 10 --bs ${BS} --device ${DEVICE} --logdir ${TB_DIR} --checkpoint ${CHECKPOINT} --height 1 --depth 2
else
python train.py t2-fs ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} -o ${OUT_DIR} -d ${D} --attention ${ATT} --position ${POS} --epochs 10 --bs ${BS} --device ${DEVICE} --logdir ${TB_DIR} --height 1 --depth 2
fi
done
done
done
done
\ No newline at end of file
#!/bin/bash
#SBATCH -N 1
#SBATCH -p gpu
#SBATCH --gres gpu:rtx6000:1
#SBATCH --job-name t2_fs-h1d2
#SBATCH --time 10-0
#SBATCH --mem 20G
#SBATCH -o logs/out-%j.txt
#SBATCH -e logs/err-%j.txt
#SBATCH --mail-type=ALL
#SBATCH --mail-user=gaetan.caillaut@univ-lemans.fr
eval "$(conda shell.bash hook)"
conda activate polysemy
TRAIN="data/lemmatized/t2/train.csv"
DEV="data/lemmatized/t2/dev.csv"
TEST="data/lemmatized/t2/test.csv"
TOKENIZER="output/tokenizer.json"
PRETRAINED_DIR="models/lemmatized"
OUT_DIR="models/t2_fs/lemmatized"
BS=256
DEVICE="cuda"
LOGDIR="runs/t2_fs/lemmatized"
for d in ${OUT_DIR} ${LOGDIR}; do
if [ ! -d ${d} ]; then
mkdir -p ${d}
fi
done
export PYTHONPATH="/lium/raid01_b/gcaillaut/polysemy/minibert:${PYTHONPATH}"
set -x
set -e
for E in $(seq -f "%05g" 0 10 40); do
for D in 16 32 64; do
for ATT in "self-attention" "non-transforming" "semi-transforming"; do
for POS in "none" "fixed" "trained"; do
MLM_RUN_NAME="d${D}_${ATT}_${POS}_gelu_norm_h1d2"
T2_RUN_NAME="d${D}_${ATT}_${POS}_norm_h1d2"
TB_DIR="${LOGDIR}/${T2_RUN_NAME}"
if ((10#$E>0)); then
CHECKPOINT="${OUT_DIR}/${T2_RUN_NAME}/checkpoint-${E}.tar"
python train.py t2-fs ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} -o ${OUT_DIR} -d ${D} --attention ${ATT} --position ${POS} --epochs 10 --bs ${BS} --device ${DEVICE} --logdir ${TB_DIR} --checkpoint ${CHECKPOINT} --height 1 --depth 2
else
python train.py t2-fs ${TRAIN} ${TEST} ${DEV} ${TOKENIZER} -o ${OUT_DIR} -d ${D} --attention ${ATT} --position ${POS} --epochs 10 --bs ${BS} --device ${DEVICE} --logdir ${TB_DIR} --height 1 --depth 2
fi
done
done
done
done
\ No newline at end of file
......@@ -30,7 +30,7 @@ def parse_attention(s):
def run_name_from_params(args):
return "_".join([
s = "_".join([
f"d{args.d}",
args.attention.strip().lower(),
args.position.strip().lower(),
......@@ -38,6 +38,10 @@ def run_name_from_params(args):
"nonorm" if args.dont_normalize else "norm"
])
if args.height != 1 or args.depth != 1:
s = f"{s}_h{args.height}d{args.depth}"
return s
def t1_run_name_from_params(args):
s = "_".join([
......@@ -54,6 +58,9 @@ def t1_run_name_from_params(args):
if freeze_attention:
s = f"{s}_frozen"
if args.height != 1 or args.depth != 1:
s = f"{s}_h{args.height}d{args.depth}"
return s
......@@ -72,17 +79,20 @@ def t2_run_name_from_params(args):
if freeze_attention:
s = f"{s}_frozen"
if args.height != 1 or args.depth != 1:
s = f"{s}_h{args.height}d{args.depth}"
return s
def mlm_model_from_params(d, attention, position, tokenizer, max_seq_size, mask_token, pad_token, activation, device, checkpoint_path=None):
def mlm_model_from_params(d, attention, position, tokenizer, max_seq_size, mask_token, pad_token, activation, device, checkpoint_path=None, height=1, depth=1):
if checkpoint_path is None:
vocabulary = tokenizer.get_vocab()
configuration_dict = dict(
# Minibert
vocabulary=vocabulary,
depth=1,
height=1,
depth=depth,
height=height,
mask_idx=tokenizer.token_to_id(mask_token),
mask_token=mask_token,
pad_idx=tokenizer.token_to_id(pad_token),
......@@ -126,14 +136,14 @@ def mlm_model_from_checkpoint(checkpoint_path, device="cpu"):
return model, optimizer, prev_epoch, configuration_dict
def t1_model_from_params(pretrained_path, d, attention, position, tokenizer, max_seq_size, mask_token, pad_token, device, checkpoint_path=None):
def t1_model_from_params(pretrained_path, d, attention, position, tokenizer, max_seq_size, mask_token, pad_token, device, checkpoint_path=None, height=1, depth=1):
if checkpoint_path is None:
vocabulary = tokenizer.get_vocab()
configuration_dict = dict(
# Minibert
vocabulary=vocabulary,
depth=1,
height=1,
depth=depth,
height=height,
mask_idx=tokenizer.token_to_id(mask_token),
mask_token=mask_token,
pad_idx=tokenizer.token_to_id(pad_token),
......@@ -180,14 +190,14 @@ def t1_model_from_checkpoint(checkpoint_path, device="cpu"):
return model, optimizer, prev_epoch, configuration_dict
def t2_model_from_params(pretrained_path, d, attention, position, tokenizer, max_seq_size, mask_token, pad_token, device, checkpoint_path=None):
def t2_model_from_params(pretrained_path, d, attention, position, tokenizer, max_seq_size, mask_token, pad_token, device, checkpoint_path=None, height=1, depth=1):
if checkpoint_path is None:
vocabulary = tokenizer.get_vocab()
configuration_dict = dict(
# Minibert
vocabulary=vocabulary,
depth=1,
height=1,
depth=depth,
height=height,
mask_idx=tokenizer.token_to_id(mask_token),
mask_token=mask_token,
pad_idx=tokenizer.token_to_id(pad_token),
......@@ -266,7 +276,7 @@ def train_mlm(args):
test_dataset, collate_fn=mlm_collater, batch_size=args.bs, pin_memory=pin_memory)
model, optimizer, prev_epoch, config_dict = mlm_model_from_params(
args.d, attention_type, position_type, tokenizer, max_seq_size, mask_token, pad_token, args.activation, device, checkpoint_path=args.checkpoint)
args.d, attention_type, position_type, tokenizer, max_seq_size, mask_token, pad_token, args.activation, device, checkpoint_path=args.checkpoint, height=args.height, depth=args.depth)
run_name = run_name_from_params(args)
if args.logdir is None:
......@@ -1274,6 +1284,8 @@ if __name__ == "__main__":
mlm_parser.add_argument("-e", "--epochs", type=int, default=100)
mlm_parser.add_argument("--attention", type=str, default="self-attention")
mlm_parser.add_argument("--position", type=str, default="fixed")
mlm_parser.add_argument("--height", type=int, default=1)
mlm_parser.add_argument("--depth", type=int, default=1)
mlm_parser.add_argument("--dont-normalize", action="store_true")
mlm_parser.add_argument("--activation", type=str, default="gelu")
mlm_parser.add_argument("--device", type=str, default="cpu")
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment