zh-tw-llm-ta01-pythia-1b-ta8000-v1-a_2_lora_instruction_tune-a100-t00t1-4d09e9

This model is a part of the zh-tw-llm project.

Training has been early aborted at epoch 1.3052011776251227, global_step 2660.

  • Base model: /llm_training_data/zh-tw-llm/models/zh-tw-llm-ta01-pythia-1b-ta8000-v1-a_1_embeddings-a100-t02-3d435e
  • Tokenizer: zh-tw-pythia-tokenizer-a8000-v1
  • Vocab size: 58113
  • Train: a_2_lora_instruction_tune
  • Dataset used: zh-tw-pythia-ta8000-v1-it1-sg-001
  • Full config:
    {"project_name": "zh-tw-llm", "group_name": "ta01", "hf_user_or_org_name": "zh-tw-llm-dv", "base_tokenizer_name": "EleutherAI/pythia-70m", "base_model_name": "EleutherAI/pythia-1b", "tokenizer_name": "zh-tw-pythia-tokenizer-a8000-v1", "tokenizer": {"build_with": "word_frequency_list", "tokens_to_add": 8000, "word_frequency_list_settings": {"word_frequency_list_name": "zetavg/tw-sinica-corpus-word-frequency", "include_words": ["。", ",", "、", "?", "!", ";", ":", "……", "~", "「", "」", "『", "』", "【", "】", "〖", "〗", "(", ")", "〔", "〕", "[", "]", "{", "}", "《", "》", "〈", "〉", "——", "──", "-", "−", "_", "・", ".", "·", "/", "\", "|", "<", ">"], "replace_rules": [{"match": {"regex": "�"}, "replace": null}, {"match": {"pos": ["Nb", "FW", null]}, "replace": null, "except": ["奧運", "中共", "國民黨", "民進黨", "新黨", "共產黨", "媽祖", "耶穌"]}, {"match": {"regex": ["^[A-Za-z0-9﹒• ]+$", "^[零一二兩三四五六七八九十廿卅百千萬億兆壹貳參肆伍陸柒捌玖拾佰仟0-9﹒•]{2,}$", "^([零一二兩三四五六七八九十廿卅百千萬億兆壹貳參肆伍陸柒捌玖拾佰仟0-9﹒•]+)$", "^[第數][零一二兩三四五六七八九十百千萬億兆0-9﹒•]+$", "^[零一二兩三四五六七八九十廿卅百千萬億兆0-9﹒•]+分之[零一二兩三四五六七八九十廿卅百千萬億兆0-9﹒•]+$", "^[零一二兩三四五六七八九十廿卅百千萬億兆0-9﹒•]+[多餘來幾成次年月日天時分點世代歲起段樓%]$", "^[零一二三四五六七八九十廿卅0-9]+(月份|年代?|世紀|學?年度|年級)$", "^(星期|週|周)[一二三四五六日]$"]}, "replace": null, "except": ["十分", "一起", "一點", "一時", "千萬", "兩三", "百分之百"]}, {"match": {"pos": "VHC", "regex": "^(.{2,})化$"}, "sub": "\\1"}, {"match": "高爾夫球場", "replace": "高爾夫"}, {"match": {"regex": "^(.+球)場$"}, "sub": "\\1"}, {"match": {"pos": "Nc", "regex": "^(.{2,})園區$"}, "sub": "\\1"}, {"match": {"pos": "Nc", "regex": "^(.{2,})[鄉鎮縣市區]$"}, "sub": "\\1"}, {"match": {"pos": "Nc", "regex": "^(.{2,})[界院部會署局館系所]$"}, "sub": "\\1", "except": ["委員會", "研究所", "中研院", "國科會", "資策會", "經建會", "工研院", "電信總局", "鎮公所", "事務所", "交易所", "農委會", "鄉公所", "地檢署", "警分局", "派出所", "托兒所", "消基會", "文建會", "兩廳院", "陸委會", "市議會"]}, {"match": {"pos": "Na", "regex": "^(.{2,})人$"}, "sub": "\\1", "except": ["年輕人", "負責人", "投資人", "候選人", "一家人", "當地人", "製作人"]}, {"match": {"pos": "Na", "regex": "^(.{2,3})學?家$"}, "sub": "\\1", "except": ["女人家", "婦人家", "新儒家", "窮人家", "縱橫家", "老人家", "老東家", "闊人家", "大戶人家", "婦道人家", "小戶人家", "水上人家", "諸子百家"]}, {"match": {"pos": "Na", "regex": "^副?總?([^副總]{2,})師$"}, "sub": "\\1", "except": ["中醫師", "囝仔師", "正機師", "準教師", "獸醫師", "班導師", "練馬師", "總舖師", "老像師", "新三十師", "至聖先師", "音樂大師"]}, {"match": {"pos": "Na", "regex": "^[原前]?(?:代|代理)?副?總?([^前代副總議警里首院部署局廳司處科組課股]{2,})[院部署局廳司處科組課股]?次?長$"}, "sub": "\\1", "except": ["董事長", "理事長", "秘書長", "執行長", "分局長", "縣市長", "一技之長", "省市長", "負成長", "高成長", "大家長", "小組長", "區組長", "低成長", "偵一組長", "停管隊長", "考選部長", "年增長", "正成長", "支店長", "公賣局長", "中宣部長", "小市長"]}, {"match": {"pos": "Na", "regex": "^副?總?正?([^副總正議委人隊]{2,})[委人隊]?員$"}, "sub": "\\1", "except": ["主跑員", "乘務員", "佐理員", "共黨員", "外務員", "從業員", "特派員", "義服員", "銜道員", "啦啦隊員", "指服團員"]}, {"match": {"pos": "Na", "regex": "^副(.{2,})$"}, "sub": "\\1", "except": ["副作用"]}, {"match": "一剎那", "replace": "剎那"}, {"match": "不能夠", "replace": "能夠"}, {"match": "光碟機", "replace": "光碟"}, {"match": "共和國", "replace": "共和"}, {"match": "原住民", "replace": "住民"}, {"match": "吸引力", "replace": "吸引"}, {"match": "國際性", "replace": "國際"}, {"match": "垃圾場", "replace": "垃圾"}, {"match": "大規模", "replace": "規模"}, {"match": "廢棄物", "replace": "廢棄"}, {"match": "愛滋病", "replace": "愛滋"}, {"match": "成交量", "replace": "成交"}, {"match": "接觸到", "replace": "接觸"}, {"match": "掩埋場", "replace": "掩埋"}, {"match": "正確率", "replace": "正確"}, {"match": "清華園", "replace": "清華"}, {"match": "聯誼會", "replace": "聯誼"}, {"match": "調查站", "replace": "調查"}, {"match": "轉換成", "replace": "轉換"}, {"match": "開放式", "replace": "開放"}, {"match": "開玩笑", "replace": "玩笑"}, {"match": "陽明山", "replace": "陽明"}, {"match": "雜貨店", "replace": "雜貨"}, {"match": "電視機", "replace": "電視"}, {"match": "高品質", "replace": "品質"}, {"match": "鬆弛法", "replace": "鬆弛"}, {"match": "共產主義", "replace": "共產"}, {"match": "資本主義", "replace": "資本"}, {"match": "微處理器", "replace": "處理器"}, {"match": "有線電視", "replace": "電視"}, {"match": "隨選視訊", "replace": "視訊"}, {"match": "電信總局", "replace": "總局"}, {"match": "進一步", "replace": ["一步", "進一步"]}, {"match": "差不多", "replace": ["不多", "差不多"]}, {"match": "忍不住", "replace": ["不住", "忍不住"]}, {"match": "不見得", "replace": ["見得", "不見得"]}, {"match": "有助於", "replace": ["助於", "有助於"]}, {"match": "舊金山", "replace": ["金山", "舊金山"]}, {"match": "大躍進", "replace": ["躍進", "大躍進"]}, {"match": "半導體", "replace": ["導體", "半導體"]}, {"match": "總幹事", "replace": ["幹事", "總幹事"]}, {"match": "兩廳院", "replace": ["廳院", "兩廳院"]}]}}, "training": {"a_1_embeddings": {"run_name_suffix": "a100-t02", "max_tokens_length": 512, "dataset_name": "zh-tw-pythia-ta8000-v1-e1-tr_sg-001-c512", "dataset": {"build_with": ["translations", "sharegpt"], "preview_length": 128, "translations_settings": {"source_dataset": "zetavg/coct-en-zh-tw-translations-twp-300k", "lang_1_key": "en", "lang_2_key": "ch", "templates": ["English: {lang_1}\nChinese: {lang_2}", "Chinese: {lang_2}\nEnglish: {lang_1}"], "rows_limit": 100000, "test_size": 100, "test_split_seed": 42}, "sharegpt_settings": {"source_dataset": "zetavg/ShareGPT-Processed", "train_on_inputs": false, "languages": [{"en": 0.4}, "zh_Hant"], "rows_limit": 8000, "test_size": 0.02, "test_split_seed": 42, "test_rows_limit": 100}}, "only_train_parameters_matching": ["embed"], "training_arguments": {"num_train_epochs": 1, "auto_find_batch_size": true, "gradient_accumulation_steps": 1, "optim": "adamw_torch", "learning_rate": 5e-05, "lr_scheduler_type": "constant", "warmup_steps": 100, "logging_steps": 10, "eval_steps": 500, "save_steps": 5000, "save_total_limit": 3}}, "b_1_embeddings_and_attention": {"run_name_suffix": "a100-t02", "dataset": {"same_as": "a_1_embeddings"}, "only_train_parameters_matching": ["embed", "attention"], "training_arguments": {"num_train_epochs": 1, "auto_find_batch_size": true, "gradient_accumulation_steps": 1, "optim": "adamw_torch", "learning_rate": 5e-05, "lr_scheduler_type": "constant", "warmup_steps": 100, "logging_steps": 10, "eval_steps": 500, "save_steps": 5000, "save_total_limit": 3}}, "c_1_all_params": {"run_name_suffix": "a100-t02", "dataset": {"same_as": "a_1_embeddings"}, "training_arguments": {"num_train_epochs": 1, "auto_find_batch_size": true, "gradient_accumulation_steps": 1, "optim": "adamw_torch", "learning_rate": 5e-05, "lr_scheduler_type": "constant", "warmup_steps": 100, "logging_steps": 10, "eval_steps": 500, "save_steps": 5000, "save_total_limit": 3}}, "a_2_lora_instruction_tune": {"run_name_suffix": "a100-t00t1", "max_tokens_length": 2048, "base_on": {"output_of": "a_1_embeddings"}, "use_peft": "lora", "dataset_name": "zh-tw-pythia-ta8000-v1-it1-sg-001", "dataset": {"build_with": ["sharegpt"], "preview_length": 512, "sharegpt_settings": {"source_dataset": "zetavg/ShareGPT-Processed", "train_on_inputs": false, "languages": [{"en": 0.3}, {"zh": 0.2}, "zh_Hant"], "rows_limit": 10000, "test_size": 0.01, "test_split_seed": 42, "test_rows_limit": 100}}, "training_arguments": {"num_train_epochs": 4, "auto_find_batch_size": true, "gradient_accumulation_steps": 1, "optim": "adamw_torch", "learning_rate": 5e-05, "lr_scheduler_type": "constant", "warmup_steps": 100, "logging_steps": 10, "eval_steps": 100, "save_steps": 2000, "save_total_limit": 5}, "lora_config": {"task_type": "CAUSAL_LM", "r": 256, "lora_alpha": 512, "lora_dropout": 0.05, "target_modules": ["embed", "input", "query_key_value", "dense"]}, "log_output_every_n_steps": 50}}, "push_outputs_to_hf": true, "report_to_wandb": true, "wandb_project": "zh-tw-llm", "wandb_group": "ta01"}
    
Downloads last month

-

Downloads are not tracked for this model. How to track
Inference API
Unable to determine this model's library. Check the docs .

Dataset used to train zh-tw-llm-dv/zh-tw-llm-ta01-pythia-1b-ta8000-v1-a_2_lora_instruction_tune-a100-t00t1-4d09e9