-
-
Save cedrickchee/a7c23685aace1f75ac31a0c558b6ee91 to your computer and use it in GitHub Desktop.
How I run 65B using my fork of llama at https://github.com/shawwn/llama
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
mp=1; size=7B; # to run 7B | |
mp=8; size=65B; # to run 65B | |
for seed in $(randint 1000000) | |
do | |
export TARGET_FOLDER=~/ml/data/llama/LLaMA | |
time python3 -m torch.distributed.run --nproc_per_node $mp example.py --ckpt_dir $TARGET_FOLDER/$size --tokenizer_path $TARGET_FOLDER/tokenizer.model --seed $seed --max_seq_len 2048 --max_gen_len 2048 --count 0 | tee -a ${size}_startrek.txt | |
done |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment