Docfile commited on
Commit
031e364
·
verified ·
1 Parent(s): 2a1073e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -71
app.py CHANGED
@@ -1,94 +1,68 @@
 
1
  import gradio as gr
2
- from PIL import Image
3
  import google.generativeai as genai
4
- import time
5
- import pathlib
6
- # Configure the API key directly in the script
7
- API_KEY = 'AIzaSyDnnYRJ49VUm_2FiKhNubv85g6KCDjcNSc'
8
- genai.configure(api_key=API_KEY)
9
 
10
- # Generation configuration
 
 
 
 
 
11
  generation_config = {
12
- "temperature": 1,
13
- "top_p": 0.95,
14
- "top_k": 64,
15
- "max_output_tokens": 8192,
16
- "response_mime_type": "text/plain",
17
  }
18
 
19
- # Safety settings
20
  safety_settings = [
21
- {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
22
- {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
23
- {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"},
24
- {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
 
 
 
 
 
 
 
 
 
 
 
 
25
  ]
26
 
27
- # Model name
28
- MODEL_NAME = "gemini-1.5-pro-latest"
29
 
30
 
31
- # Create the model
32
- model = genai.GenerativeModel(
33
- model_name=MODEL_NAME,
34
- safety_settings=safety_settings,
35
- generation_config=generation_config,
36
- )
37
 
38
- e =""
39
- # Fonction pour générer le contenu
40
- async def generate_content(pro,image):
41
- global e
42
 
43
- if not image:
44
- response = model.generate_content(pro)
45
- print(response)
46
- e = response.text
47
- print(e)
48
-
49
- else:
50
- '''
51
- print(f"Uploading file...")
52
- uploaded_video = genai.upload_file(path=image)
53
- print(f"Completed upload: {uploaded_video.uri}")
54
-
55
-
56
- while uploaded_video.state.name == "PROCESSING":
57
- print("Waiting for video to be processed.")
58
- time.sleep(2)
59
- uploaded_video = genai.get_file(uploaded_video.name)
60
-
61
- if uploaded_video.state.name == "FAILED":
62
- raise ValueError(uploaded_video.state.name)
63
-
64
- print(f"Video processing complete: " + uploaded_video.uri)
65
-
66
- print("Making LLM inference request...") '''
67
-
68
- image_input = {
69
- 'mime_type': 'image/jpeg',
70
- 'data': pathlib.Path(image).read_bytes()
71
- }
72
- response = model.generate_content(
73
- [prompt, image_input], request_options={"timeout": 600}
74
- )
75
-
76
- #genai.delete_file(uploaded_video.name)
77
- #print(f"Deleted file {uploaded_video.uri}")
78
-
79
 
 
80
 
 
 
 
 
81
 
82
 
 
 
 
 
 
 
 
 
 
 
 
83
 
84
- e = response
85
  return e
86
 
87
 
88
- markdown = r"""
89
- e
90
- """.format(e)
91
- # Interface Gradio
92
- iface = gr.Interface(fn=generate_content, inputs=[gr.Textbox(),gr.Image(type='pil')], outputs= gr.Markdown(markdown, latex_delimiters=[{ "left":"$$", "right":"$$", "display": True }]))
93
 
94
  iface.launch()
 
1
+
2
  import gradio as gr
 
3
  import google.generativeai as genai
 
 
 
 
 
4
 
5
+ import os
6
+ token=os.environ.get("TOKEN")
7
+ e = ""
8
+ genai.configure(
9
+ api_key=token
10
+ )
11
  generation_config = {
12
+ "temperature": 1,
13
+ "top_p": 0.95,
14
+ "top_k": 64,
15
+ "max_output_tokens": 8192,
 
16
  }
17
 
 
18
  safety_settings = [
19
+ {
20
+ "category": "HARM_CATEGORY_HARASSMENT",
21
+ "threshold": "BLOCK_NONE"
22
+ },
23
+ {
24
+ "category": "HARM_CATEGORY_HATE_SPEECH",
25
+ "threshold": "BLOCK_NONE"
26
+ },
27
+ {
28
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
29
+ "threshold": "BLOCK_NONE"
30
+ },
31
+ {
32
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
33
+ "threshold": "BLOCK_NONE"
34
+ },
35
  ]
36
 
 
 
37
 
38
 
 
 
 
 
 
 
39
 
 
 
 
 
40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
+ mm = """ resous cet exercice. réponse attendue uniquement en Latex
43
 
44
+ """
45
+ model = genai.GenerativeModel(model_name="gemini-1.5-pro",
46
+ generation_config=generation_config,system_instruction=mm ,
47
+ safety_settings=safety_settings)
48
 
49
 
50
+ # Fonction pour générer le contenu
51
+ def generate_content(image):
52
+ global e
53
+
54
+ if not image:
55
+ e =" djo"
56
+
57
+ else:
58
+ response = model.generate_content([image])
59
+ print(response.text)
60
+ e= response.text
61
 
 
62
  return e
63
 
64
 
65
+
66
+ iface = gr.Interface(fn=generate_content, inputs=gr.Image(type='pil'), outputs= gr.Textbox())
 
 
 
67
 
68
  iface.launch()