ProCreations commited on
Commit
c08680c
·
verified ·
1 Parent(s): e18ee0c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -1
app.py CHANGED
@@ -1,10 +1,12 @@
1
  #!/usr/bin/env python3
2
- # sloppy header
 
3
 
4
  import os, json, time, random, threading, logging
5
  from datetime import datetime, timezone
6
  import torch; torch.set_num_threads(os.cpu_count()); torch.set_num_interop_threads(os.cpu_count())
7
  import gradio as gr
 
8
  from transformers import AutoTokenizer, AutoModelForCausalLM
9
  from gradio.themes import Dark
10
 
@@ -12,6 +14,7 @@ MODEL_NAME = "meta-llama/Llama-3.1-8B-Instruct"
12
  PROMPTS_PATH = "full_prompts.json"
13
  STATE_PATH = "current_state.json"
14
  DATA_PATH = "data.json"
 
15
  TOKENS_PER_PROMPT = 2048
16
  SECS_PER_TOKEN = 15
17
  TEMP = 0.9; TOP_P = 0.95; MAX_CTX = 8192
 
1
  #!/usr/bin/env python3
2
+
3
+
4
 
5
  import os, json, time, random, threading, logging
6
  from datetime import datetime, timezone
7
  import torch; torch.set_num_threads(os.cpu_count()); torch.set_num_interop_threads(os.cpu_count())
8
  import gradio as gr
9
+
10
  from transformers import AutoTokenizer, AutoModelForCausalLM
11
  from gradio.themes import Dark
12
 
 
14
  PROMPTS_PATH = "full_prompts.json"
15
  STATE_PATH = "current_state.json"
16
  DATA_PATH = "data.json"
17
+
18
  TOKENS_PER_PROMPT = 2048
19
  SECS_PER_TOKEN = 15
20
  TEMP = 0.9; TOP_P = 0.95; MAX_CTX = 8192