Spaces:
Running
Running
File size: 1,666 Bytes
0b07a42 362448c 0b07a42 f48b842 0b07a42 f48b842 0b07a42 40be773 0b07a42 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 |
from typing import List
# this try/except is important for publishing to Hugging Face
try:
from dagster import Config
except ImportError:
Config = object
class LLMBoardConfig(Config):
group_columns: List[str] = ["model", "language", "template_name"]
single_values_columns: List[str] = ["execution_time", "characters_count", "words_count"]
list_columns: List[str] = ["chunk_sizes", "chunk_generation_times", "chunk_generation_times_by_chunk_sizes"]
plot_dir: str = "./html/plots/"
plot_json_dir: str = "./data/"
saving_path: str = "data/"
class QueriesConfig(Config):
base_query_template: str = """Summarize me this text, the summary should be in {language}
```
{text}
```
"""
query_template: dict = {
"markdown": """Return output as markdown""",
"json": """Return output as json in format:
{
"summary": "<summary">
}""",
"call": """Return output by calling summary_result()""",
}
class OpenAIConfig(Config):
mock: bool = False
remove_old_measurements: bool = False
class QueriesDatasetConfig(Config):
dataset_name: str = "GEM/xlsum"
samples_per_measurement: int = 20
languages: List[str] = ["english", "japanese"]
query_config: QueriesConfig = QueriesConfig()
class SummaryConfig(Config):
saving_path: str = "data/"
class ModelCostsConfig(Config):
saving_path: str = "data/"
class TimeOfDayComparisonConfig(Config):
saving_path: str = "data/"
class GeneralPlotConfig(Config):
plots_dir: str = "./html/plots/"
saving_path: str = "data/"
seconds_per_token: float = 184 / 6
input_size: int = 100
expected_output_size: int = 50
|