Aiswarya Sankar commited on
Commit
5a5e54f
·
1 Parent(s): a1846a5

Update openAI key and documentation

Browse files
Files changed (1) hide show
  1. app.py +45 -34
app.py CHANGED
@@ -18,7 +18,7 @@ import random
18
  import time
19
  import together
20
 
21
- os.environ['OPENAI_API_KEY']='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'
22
  os.environ['ACTIVELOOP_TOKEN']='eyJhbGciOiJIUzUxMiIsImlhdCI6MTY4MTU5NTgyOCwiZXhwIjoxNzEzMjE4MTU5fQ.eyJpZCI6ImFpc3dhcnlhcyJ9.eoiMFZsS20zzMXXupFbowUlLdgIgf_MA1ck_DByzREeoQvNm8GPhKEfqea2y1Qak-ud2jo9dhSTBTfRe1ztezw'
23
 
24
 
@@ -218,7 +218,7 @@ def answer_questions(question: str, github: str, **kwargs) -> Response:
218
  github = repoName[:-4]
219
  print(github)
220
  try:
221
- embeddings = OpenAIEmbeddings(openai_api_key="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")
222
  pathName = github.split('/')[-1]
223
  dataset_path = "hub://aiswaryas/" + pathName
224
 
@@ -234,14 +234,14 @@ def answer_questions(question: str, github: str, **kwargs) -> Response:
234
  q = SimpleQueue()
235
 
236
  model = ChatOpenAI(
237
- model_name='gpt-4',
238
  temperature=0.0,
239
  verbose=True,
240
  streaming=True, # Pass `streaming=True` to make sure the client receives the data.
241
  callback_manager=CallbackManager(
242
  [StreamingGradioCallbackHandler(q)]
243
  ),
244
- openai_api_key="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",
245
  )
246
  qa = ConversationalRetrievalChain.from_llm(model,retriever=retriever)
247
  chat_history = []
@@ -342,7 +342,7 @@ def generateDocumentationPerFolder(dir, github):
342
 
343
  print(prompt)
344
  try:
345
- embeddings = OpenAIEmbeddings(openai_api_key="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")
346
  pathName = github.split('/')[-1]
347
  print("PATH NAME: " + str(pathName))
348
  dataset_path = "hub://aiswaryas/" + pathName
@@ -358,11 +358,11 @@ def generateDocumentationPerFolder(dir, github):
358
 
359
  # streaming_handler = kwargs.get('streaming_handler')
360
  model = ChatOpenAI(
361
- model_name='gpt-4',
362
  temperature=0.0,
363
  verbose=True,
364
  streaming=True, # Pass `streaming=True` to make sure the client receives the data.
365
- openai_api_key="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",
366
  )
367
  qa = ConversationalRetrievalChain.from_llm(model,retriever=retriever)
368
  chat_history = []
@@ -405,7 +405,7 @@ def solveGithubIssue(ticket, history) -> Response:
405
  print(question)
406
 
407
  try:
408
- embeddings = OpenAIEmbeddings(openai_api_key="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")
409
  pathName = github.split('/')[-1]
410
  dataset_path = "hub://aiswaryas/" + pathName
411
 
@@ -420,14 +420,14 @@ def solveGithubIssue(ticket, history) -> Response:
420
 
421
  q = SimpleQueue()
422
  model = ChatOpenAI(
423
- model_name='gpt-4',
424
  temperature=0.0,
425
  verbose=True,
426
  streaming=True, # Pass `streaming=True` to make sure the client receives the data.
427
  callback_manager=CallbackManager(
428
  [StreamingGradioCallbackHandler(q)]
429
  ),
430
- openai_api_key="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",
431
  )
432
  qa = ConversationalRetrievalChain.from_llm(model,retriever=retriever,max_tokens_limit=8000)
433
 
@@ -455,7 +455,7 @@ def bot(history, **kwargs):
455
  print("Repo name in the bot: " + str(repoName))
456
  github = repoName[:-4]
457
  try:
458
- embeddings = OpenAIEmbeddings(openai_api_key="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")
459
  pathName = github.split('/')[-1]
460
  dataset_path = "hub://aiswaryas/" + pathName
461
 
@@ -470,14 +470,14 @@ def bot(history, **kwargs):
470
 
471
  q = SimpleQueue()
472
  model = ChatOpenAI(
473
- model_name='gpt-4',
474
  temperature=0.0,
475
  verbose=True,
476
  streaming=True, # Pass `streaming=True` to make sure the client receives the data.
477
  callback_manager=CallbackManager(
478
  [StreamingGradioCallbackHandler(q)]
479
  ),
480
- openai_api_key="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",
481
  )
482
  qa = ConversationalRetrievalChain.from_llm(model,retriever=retriever)
483
  chat_history = []
@@ -585,28 +585,39 @@ with gr.Blocks() as demo:
585
  """)
586
 
587
  # docs = generateDocumentationPerFolder("overview", repo_name)
588
- markdown = gr.Markdown()
589
 
590
- def button_click_callback(markdown):
591
- print("IN BUTTON CLICK CALLBACK")
592
- docs = generateDocumentationPerFolder("overview", repoName[:-4])
593
- markdown.update(docs)
594
-
595
- # Generate the left column buttons and their names and wrap each one in a function
596
- with gr.Row():
597
- with gr.Column(scale=.5, min_width=300):
598
- dirNames = generateFolderNamesForRepo(repoName[:-4])
599
- print(dirNames)
600
- buttons = [gr.Button(folder_name, onclick=button_click_callback) for folder_name in dirNames]
601
- for btn, folder_name in zip(buttons, dirNames):
602
- btn.click(button_click_callback, [markdown], [markdown] )
603
-
604
-
605
- # Generate the overall documentation for the main bubble at the same time
606
- print("REPO NAME IN DOCS: " + str(repoName[:-4]))
607
- with gr.Column(scale=2, min_width=300):
608
- docs = generateDocumentationPerFolder("overview", repoName[:-4])
609
- markdown.update(docs)
 
 
 
 
 
 
 
 
 
 
 
 
610
 
611
  # For each folder, generate a diagram and 2-3 prompts that dive deeper into explaining content
612
 
 
18
  import time
19
  import together
20
 
21
+ os.environ['OPENAI_API_KEY']='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'
22
  os.environ['ACTIVELOOP_TOKEN']='eyJhbGciOiJIUzUxMiIsImlhdCI6MTY4MTU5NTgyOCwiZXhwIjoxNzEzMjE4MTU5fQ.eyJpZCI6ImFpc3dhcnlhcyJ9.eoiMFZsS20zzMXXupFbowUlLdgIgf_MA1ck_DByzREeoQvNm8GPhKEfqea2y1Qak-ud2jo9dhSTBTfRe1ztezw'
23
 
24
 
 
218
  github = repoName[:-4]
219
  print(github)
220
  try:
221
+ embeddings = OpenAIEmbeddings(openai_api_key="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")
222
  pathName = github.split('/')[-1]
223
  dataset_path = "hub://aiswaryas/" + pathName
224
 
 
234
  q = SimpleQueue()
235
 
236
  model = ChatOpenAI(
237
+ model_name='gpt-3.5-turbo-16k',
238
  temperature=0.0,
239
  verbose=True,
240
  streaming=True, # Pass `streaming=True` to make sure the client receives the data.
241
  callback_manager=CallbackManager(
242
  [StreamingGradioCallbackHandler(q)]
243
  ),
244
+ openai_api_key="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",
245
  )
246
  qa = ConversationalRetrievalChain.from_llm(model,retriever=retriever)
247
  chat_history = []
 
342
 
343
  print(prompt)
344
  try:
345
+ embeddings = OpenAIEmbeddings(openai_api_key="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")
346
  pathName = github.split('/')[-1]
347
  print("PATH NAME: " + str(pathName))
348
  dataset_path = "hub://aiswaryas/" + pathName
 
358
 
359
  # streaming_handler = kwargs.get('streaming_handler')
360
  model = ChatOpenAI(
361
+ model_name='gpt-3.5-turbo-16k',
362
  temperature=0.0,
363
  verbose=True,
364
  streaming=True, # Pass `streaming=True` to make sure the client receives the data.
365
+ openai_api_key="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",
366
  )
367
  qa = ConversationalRetrievalChain.from_llm(model,retriever=retriever)
368
  chat_history = []
 
405
  print(question)
406
 
407
  try:
408
+ embeddings = OpenAIEmbeddings(openai_api_key="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")
409
  pathName = github.split('/')[-1]
410
  dataset_path = "hub://aiswaryas/" + pathName
411
 
 
420
 
421
  q = SimpleQueue()
422
  model = ChatOpenAI(
423
+ model_name='gpt-3.5-turbo-16k',
424
  temperature=0.0,
425
  verbose=True,
426
  streaming=True, # Pass `streaming=True` to make sure the client receives the data.
427
  callback_manager=CallbackManager(
428
  [StreamingGradioCallbackHandler(q)]
429
  ),
430
+ openai_api_key="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",
431
  )
432
  qa = ConversationalRetrievalChain.from_llm(model,retriever=retriever,max_tokens_limit=8000)
433
 
 
455
  print("Repo name in the bot: " + str(repoName))
456
  github = repoName[:-4]
457
  try:
458
+ embeddings = OpenAIEmbeddings(openai_api_key="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")
459
  pathName = github.split('/')[-1]
460
  dataset_path = "hub://aiswaryas/" + pathName
461
 
 
470
 
471
  q = SimpleQueue()
472
  model = ChatOpenAI(
473
+ model_name='gpt-3.5-turbo-16k',
474
  temperature=0.0,
475
  verbose=True,
476
  streaming=True, # Pass `streaming=True` to make sure the client receives the data.
477
  callback_manager=CallbackManager(
478
  [StreamingGradioCallbackHandler(q)]
479
  ),
480
+ openai_api_key="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",
481
  )
482
  qa = ConversationalRetrievalChain.from_llm(model,retriever=retriever)
483
  chat_history = []
 
585
  """)
586
 
587
  # docs = generateDocumentationPerFolder("overview", repo_name)
 
588
 
589
+ # For now let's just display all of the docs in one big file
590
+ allDocs = ""
591
+ dirNames = generateFolderNamesForRepo(repoName[:-4])
592
+ for dir in dirNames:
593
+ if dir[0] != ".":
594
+ allDocs += generateDocumentationPerFolder(dir, repoName[:-4]) + '\n\n'
595
+
596
+ gr.Markdown(allDocs)
597
+
598
+
599
+ # def button_click_callback(markdown):
600
+ # print("IN BUTTON CLICK CALLBACK")
601
+ # docs = generateDocumentationPerFolder("overview", repoName[:-4])
602
+ # markdown.update(docs)
603
+
604
+ # markdown = gr.Markdown()
605
+ # # Generate the left column buttons and their names and wrap each one in a function
606
+ # with gr.Row():
607
+ # with gr.Column(scale=.5, min_width=300):
608
+ # dirNames = generateFolderNamesForRepo(repoName[:-4])
609
+ # print(dirNames)
610
+ # buttons = [gr.Button(folder_name) for folder_name in dirNames]
611
+ # for btn, folder_name in zip(buttons, dirNames):
612
+ # btn.click(button_click_callback, [markdown], [markdown] )
613
+
614
+
615
+ # # Generate the overall documentation for the main bubble at the same time
616
+ # print("REPO NAME IN DOCS: " + str(repoName[:-4]))
617
+ # with gr.Column(scale=2, min_width=300):
618
+ # docs = generateDocumentationPerFolder("overview", repoName[:-4])
619
+ # markdown.update(docs)
620
+ # markdown.render()
621
 
622
  # For each folder, generate a diagram and 2-3 prompts that dive deeper into explaining content
623