Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -26,6 +26,7 @@ import pprint
|
|
| 26 |
import json
|
| 27 |
from huggingface_hub import HfApi, login, upload_folder, create_repo
|
| 28 |
import os
|
|
|
|
| 29 |
|
| 30 |
# Load configuration file
|
| 31 |
with open('config.json', 'r') as config_file:
|
|
@@ -65,7 +66,7 @@ if (should_train_model=='1'): #train model
|
|
| 65 |
#settings
|
| 66 |
model_save_path = path_to_save_trained_model_to
|
| 67 |
bias_non_fleet = 1.0
|
| 68 |
-
epochs_to_run = .
|
| 69 |
|
| 70 |
file_path_train = train_file + ".csv"
|
| 71 |
file_path_test = test_file + ".csv"
|
|
@@ -316,14 +317,22 @@ if (should_train_model=='1'): #train model
|
|
| 316 |
commit_message="Push tokenizer",
|
| 317 |
#overwrite=True # Force overwrite existing files
|
| 318 |
)
|
|
|
|
|
|
|
|
|
|
| 319 |
|
| 320 |
-
|
| 321 |
-
|
| 322 |
-
|
| 323 |
-
|
| 324 |
-
|
| 325 |
-
|
| 326 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 327 |
|
| 328 |
#Define the label mappings (this must match the mapping used during training)
|
| 329 |
label_mapping = model.config.label_mapping
|
|
|
|
| 26 |
import json
|
| 27 |
from huggingface_hub import HfApi, login, upload_folder, create_repo
|
| 28 |
import os
|
| 29 |
+
import requests
|
| 30 |
|
| 31 |
# Load configuration file
|
| 32 |
with open('config.json', 'r') as config_file:
|
|
|
|
| 66 |
#settings
|
| 67 |
model_save_path = path_to_save_trained_model_to
|
| 68 |
bias_non_fleet = 1.0
|
| 69 |
+
epochs_to_run = .01
|
| 70 |
|
| 71 |
file_path_train = train_file + ".csv"
|
| 72 |
file_path_test = test_file + ".csv"
|
|
|
|
| 317 |
commit_message="Push tokenizer",
|
| 318 |
#overwrite=True # Force overwrite existing files
|
| 319 |
)
|
| 320 |
+
|
| 321 |
+
url = "http://210.1.253.35:200/api/hello" # Example API
|
| 322 |
+
response = requests.get(url)
|
| 323 |
|
| 324 |
+
if response.status_code == 200:
|
| 325 |
+
data = response.json() # Convert response to JSON
|
| 326 |
+
print(data)
|
| 327 |
+
else:
|
| 328 |
+
print(f"Error: {response.status_code}")
|
| 329 |
+
else:
|
| 330 |
+
print('Load Pre-trained')
|
| 331 |
+
model_save_path = f"./{model_save_path}_model"
|
| 332 |
+
tokenizer_save_path = f"./{model_save_path}_tokenizer"
|
| 333 |
+
# RobertaTokenizer.from_pretrained(model_save_path)
|
| 334 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_save_path).to('cpu')
|
| 335 |
+
tokenizer = AutoTokenizer.from_pretrained(tokenizer_save_path)
|
| 336 |
|
| 337 |
#Define the label mappings (this must match the mapping used during training)
|
| 338 |
label_mapping = model.config.label_mapping
|