mateoluksenberg commited on
Commit
b46d076
·
verified ·
1 Parent(s): 0e13882

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -31
app.py CHANGED
@@ -299,7 +299,7 @@ EXAMPLES = [
299
 
300
 
301
  @spaces.GPU()
302
- def simple_chat(message: dict, file_obj: io.BytesIO = None, temperature: float = 0.8, max_length: int = 4096, top_p: float = 1, top_k: int = 10, penalty: float = 1.0):
303
  try:
304
  model = AutoModelForCausalLM.from_pretrained(
305
  MODEL_ID,
@@ -310,18 +310,24 @@ def simple_chat(message: dict, file_obj: io.BytesIO = None, temperature: float =
310
 
311
  conversation = []
312
 
313
- if file_obj:
314
- choice, contents = mode_load(file_obj)
 
315
  if choice == "image":
316
- conversation.append({"role": "user", "image": contents, "content": message["text"]})
317
  elif choice == "doc":
318
- format_msg = contents + "\n\n\n" + "{} files uploaded.\n".format("1") + message["text"]
319
  conversation.append({"role": "user", "content": format_msg})
320
  else:
321
- conversation.append({"role": "user", "content": message["text"]})
 
 
 
 
 
322
 
323
  input_ids = tokenizer.apply_chat_template(conversation, tokenize=True, add_generation_prompt=True, return_tensors="pt", return_dict=True).to(model.device)
324
-
325
  generate_kwargs = dict(
326
  max_length=max_length,
327
  do_sample=True,
@@ -341,35 +347,37 @@ def simple_chat(message: dict, file_obj: io.BytesIO = None, temperature: float =
341
  return PlainTextResponse(f"Error: {str(e)}")
342
 
343
 
344
- @app.post("/chat/")
345
- async def test_endpoint(text: str, file_url: str = None):
346
- if not text:
347
- raise HTTPException(status_code=400, detail="Missing 'text' in request body")
348
 
349
- if file_url:
350
- file_obj = download_file(file_url)
351
- if file_obj is None:
352
- raise HTTPException(status_code=400, detail="Failed to download file from URL")
 
 
 
 
 
 
 
 
 
 
 
 
 
353
  else:
354
- file_obj = None
355
-
356
- print("Mensaje: ")
357
- print(text)
358
- print("File: ")
359
- print(file_obj)
 
 
 
360
 
361
- response = simple_chat(text, file_obj)
362
  return response
363
 
364
- def download_file(url: str) -> io.BytesIO:
365
- try:
366
- response = requests.get(url)
367
- response.raise_for_status()
368
- return io.BytesIO(response.content)
369
- except Exception as e:
370
- print(f"Error downloading file: {str(e)}")
371
- return None
372
-
373
 
374
  with gr.Blocks(css=CSS, theme="soft", fill_height=True) as demo:
375
  gr.HTML(TITLE)
 
299
 
300
 
301
  @spaces.GPU()
302
+ def simple_chat(message: dict, temperature: float = 0.8, max_length: int = 4096, top_p: float = 1, top_k: int = 10, penalty: float = 1.0):
303
  try:
304
  model = AutoModelForCausalLM.from_pretrained(
305
  MODEL_ID,
 
310
 
311
  conversation = []
312
 
313
+ if "files" in message and message["files"]:
314
+ file_content = message["files"][-1] # Assuming the last file is the one to be processed
315
+ choice, contents = mode_load(file_content, message["file_name"])
316
  if choice == "image":
317
+ conversation.append({"role": "user", "image": contents, "content": message['text']})
318
  elif choice == "doc":
319
+ format_msg = contents + "\n\n\n" + "{} files uploaded.\n".format(len(message["files"])) + message['text']
320
  conversation.append({"role": "user", "content": format_msg})
321
  else:
322
+ # Handle case where no file is uploaded
323
+ if not history:
324
+ # You might want to handle this case depending on your use case
325
+ # raise gr.Error("Please upload an image first.")
326
+ contents = None
327
+ conversation.append({"role": "user", "content": message['text']})
328
 
329
  input_ids = tokenizer.apply_chat_template(conversation, tokenize=True, add_generation_prompt=True, return_tensors="pt", return_dict=True).to(model.device)
330
+
331
  generate_kwargs = dict(
332
  max_length=max_length,
333
  do_sample=True,
 
347
  return PlainTextResponse(f"Error: {str(e)}")
348
 
349
 
 
 
 
 
350
 
351
+ @app.post("/chat/")
352
+ async def test_endpoint(
353
+ text: str = Form(...),
354
+ file: UploadFile = File(None)
355
+ ):
356
+ # Verificar si se ha subido un archivo
357
+ if file:
358
+ # Leer el archivo en memoria
359
+ file_content = BytesIO(await file.read())
360
+ file_name = file.filename
361
+
362
+ # Construir el mensaje con el archivo y el texto
363
+ message = {
364
+ "text": text,
365
+ "file_content": file_content,
366
+ "file_name": file_name
367
+ }
368
  else:
369
+ # Si no se sube archivo, solo se incluye el texto
370
+ message = {
371
+ "text": text,
372
+ "file_content": None,
373
+ "file_name": None
374
+ }
375
+
376
+ # Llamar a la función `simple_chat` con el mensaje
377
+ response = simple_chat(message)
378
 
 
379
  return response
380
 
 
 
 
 
 
 
 
 
 
381
 
382
  with gr.Blocks(css=CSS, theme="soft", fill_height=True) as demo:
383
  gr.HTML(TITLE)