File size: 1,455 Bytes
0106d5f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d960853
 
 
0106d5f
bef8e94
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import os
import json
from dotenv import load_dotenv

try:
    load_dotenv('.env')
except:
    pass

PACKAGE = 'policy_analyser'
PROJECT_DIR = os.getcwd()
PACKAGE_PATH = os.path.join(PROJECT_DIR, PACKAGE)
PROMPTS_DIR = os.path.join(PACKAGE_PATH, 'prompts')
DATA_DIR = os.path.join(PACKAGE_PATH, 'data')

CREDENTIALS = {
    'azure' : {
        'plain-text' : {
            'endpoint' : os.environ.get('AZURE_PLAIN_TEXT_ENDPOINT', ''),
            'key' : os.environ.get('AZURE_PLAIN_TEXT_KEY')
        },
        'layout' : {
            'endpoint' : os.environ.get('AZURE_LAYOUT_ENDPOINT', ''),
            'key' : os.environ.get('AZURE_LAYOUT_KEY', ''),
            'model' : os.environ.get('AZURE_LAYOUT_MODEL', '')
        }
    }
}

GPT_ENGINE = 'o3-mini'
GPT_KEY = os.environ.get('GPT_KEY', '')
GPT_VERSION = '2024-12-01-preview'
GPT_API_BASE = 'https://ai-ackods910341544474.openai.azure.com/'
AZURE_LAYOUT_ENDPOINT = 'https://acko-document-intelligence.cognitiveservices.azure.com/'
AZURE_LAYOUT_KEY = os.environ.get('AZURE_LAYOUT_KEY', '')
AZURE_LAYOUT_MODEL = 'prebuilt-layout'

# EXTRACTION_PROMPT = open(os.path.join(PROMPTS_DIR, 'extraction.txt')).read()
# entities = json.load(open(os.path.join(DATA_DIR, 'policy_analyser_entities.json')))
# for entity in entities:
    # del entity['entityId']
# entities_str = '\n---\n'.join(['\n'.join([f'{k} : {v}' for k, v in entity.items()]) for entity in entities])
# EXTRACTION_PROMPT += entities_str