ffreemt commited on
Commit
dec88ff
·
1 Parent(s): 297f7a4

Update lsb_release -a bitsandbytes device_map={'': 0} rich, check nvidia-smi with subproces.run

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -52,6 +52,13 @@ from transformers.generation import GenerationConfig
52
 
53
  from example_list import css, example_list
54
 
 
 
 
 
 
 
 
55
 
56
  def run_cmd(cmd):
57
  """Execute cmd."""
@@ -87,13 +94,6 @@ if True:
87
  if not torch.cuda.is_available():
88
  raise gr.Error("torch.cuda.is_available() is False, cant continue...")
89
 
90
- os.environ["TZ"] = "Asia/Shanghai"
91
- try:
92
- time.tzset() # type: ignore # pylint: disable=no-member
93
- except Exception:
94
- # Windows
95
- logger.warning("Windows, cant run time.tzset()")
96
-
97
  model_name = "tangger/Qwen-7B-Chat" # try
98
  model_name = "Qwen/Qwen-7B-Chat" # gone!
99
  tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
 
52
 
53
  from example_list import css, example_list
54
 
55
+ os.environ["TZ"] = "Asia/Shanghai"
56
+ try:
57
+ time.tzset() # type: ignore # pylint: disable=no-member
58
+ except Exception:
59
+ # Windows
60
+ logger.warning("Windows, cant run time.tzset()")
61
+
62
 
63
  def run_cmd(cmd):
64
  """Execute cmd."""
 
94
  if not torch.cuda.is_available():
95
  raise gr.Error("torch.cuda.is_available() is False, cant continue...")
96
 
 
 
 
 
 
 
 
97
  model_name = "tangger/Qwen-7B-Chat" # try
98
  model_name = "Qwen/Qwen-7B-Chat" # gone!
99
  tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)