Update human_text_detect.py
Browse files- human_text_detect.py +6 -6
human_text_detect.py
CHANGED
@@ -102,17 +102,17 @@ def detect_human_text(model_name, topic, text):
|
|
102 |
# Init model
|
103 |
print('Init tokenizer')
|
104 |
lm_name = 'gpt2-xl' if model_name == 'GPT2XL' else 'microsoft/phi-2'
|
105 |
-
tokenizer = AutoTokenizer.from_pretrained(lm_name, cache_dir=cache_dir_tokenizer
|
106 |
|
107 |
-
print("Save tokenizer")
|
108 |
-
tokenizer.save_pretrained(cache_dir_tokenizer)
|
109 |
|
110 |
print('Init model')
|
111 |
cache_dir_model = f"/data/cacheHuggingface/{model_name}/model"
|
112 |
-
model = AutoModelForCausalLM.from_pretrained(lm_name, cache_dir=cache_dir_model
|
113 |
|
114 |
-
print("Save model")
|
115 |
-
model.save_pretrained(cache_dir_model)
|
116 |
|
117 |
print('Init PerplexityEvaluator')
|
118 |
sentence_detector = PerplexityEvaluator(model, tokenizer)
|
|
|
102 |
# Init model
|
103 |
print('Init tokenizer')
|
104 |
lm_name = 'gpt2-xl' if model_name == 'GPT2XL' else 'microsoft/phi-2'
|
105 |
+
tokenizer = AutoTokenizer.from_pretrained(cache_dir_tokenizer) #lm_name, cache_dir=cache_dir_tokenizer
|
106 |
|
107 |
+
# print("Save tokenizer")
|
108 |
+
# tokenizer.save_pretrained(cache_dir_tokenizer)
|
109 |
|
110 |
print('Init model')
|
111 |
cache_dir_model = f"/data/cacheHuggingface/{model_name}/model"
|
112 |
+
model = AutoModelForCausalLM.from_pretrained(cache_dir_model) #lm_name, cache_dir=cache_dir_model
|
113 |
|
114 |
+
# print("Save model")
|
115 |
+
# model.save_pretrained(cache_dir_model)
|
116 |
|
117 |
print('Init PerplexityEvaluator')
|
118 |
sentence_detector = PerplexityEvaluator(model, tokenizer)
|