Commit
•
81f0c10
1
Parent(s):
0bb64a4
test
Browse files
README.md
CHANGED
@@ -8,7 +8,9 @@ sdk_version: 5.8.0
|
|
8 |
app_file: app.py
|
9 |
hf_oauth: true
|
10 |
hf_oauth_scopes:
|
11 |
-
- read-repos
|
|
|
|
|
12 |
pinned: false
|
13 |
license: apache-2.0
|
14 |
---
|
|
|
8 |
app_file: app.py
|
9 |
hf_oauth: true
|
10 |
hf_oauth_scopes:
|
11 |
+
- read-repos
|
12 |
+
- write-repos
|
13 |
+
- manage-repos
|
14 |
pinned: false
|
15 |
license: apache-2.0
|
16 |
---
|
app.py
CHANGED
@@ -41,8 +41,6 @@ DEVICE = "cpu"
|
|
41 |
LAUNCHER = "process"
|
42 |
SCENARIO = "inference"
|
43 |
BACKENDS = ["pytorch", "openvino"]
|
44 |
-
BENCHMARKS_HF_TOKEN = os.getenv("BENCHMARKS_HF_TOKEN")
|
45 |
-
BENCHMARKS_REPO_ID = "optimum-benchmark/OpenVINO-Benchmarks"
|
46 |
TASKS = set(TASKS_TO_OVMODELS.keys() | TASKS_TO_OVPIPELINES) & set(
|
47 |
TASKS_TO_AUTO_MODEL_CLASS_NAMES.keys() | TASKS_TO_AUTO_PIPELINE_CLASS_NAMES.keys()
|
48 |
)
|
@@ -66,7 +64,12 @@ def parse_configs(inputs):
|
|
66 |
|
67 |
for key in configs.keys():
|
68 |
for k, v in configs[key].items():
|
69 |
-
if k in [
|
|
|
|
|
|
|
|
|
|
|
70 |
configs[key][k] = eval(v)
|
71 |
|
72 |
configs["process"] = ProcessConfig(**configs.pop("process"))
|
@@ -93,10 +96,11 @@ def run_benchmark(inputs, oauth_token: Optional[gr.OAuthToken]):
|
|
93 |
raise gr.Error("Please login to be able to run the benchmark.")
|
94 |
|
95 |
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S")
|
96 |
-
|
97 |
-
|
|
|
98 |
|
99 |
-
gr.Info(f"📩 Benchmark will be saved under {
|
100 |
|
101 |
outputs = {backend: "Running..." for backend in BACKENDS}
|
102 |
configs = parse_configs(inputs)
|
@@ -114,14 +118,14 @@ def run_benchmark(inputs, oauth_token: Optional[gr.OAuthToken]):
|
|
114 |
benchmark_report = Benchmark.launch(benchmark_config)
|
115 |
|
116 |
benchmark_config.push_to_hub(
|
117 |
-
repo_id=
|
118 |
subfolder=benchmark_name,
|
119 |
-
token=
|
120 |
)
|
121 |
benchmark_report.push_to_hub(
|
122 |
-
repo_id=
|
123 |
subfolder=benchmark_name,
|
124 |
-
token=
|
125 |
)
|
126 |
|
127 |
except GatedRepoError:
|
|
|
41 |
LAUNCHER = "process"
|
42 |
SCENARIO = "inference"
|
43 |
BACKENDS = ["pytorch", "openvino"]
|
|
|
|
|
44 |
TASKS = set(TASKS_TO_OVMODELS.keys() | TASKS_TO_OVPIPELINES) & set(
|
45 |
TASKS_TO_AUTO_MODEL_CLASS_NAMES.keys() | TASKS_TO_AUTO_PIPELINE_CLASS_NAMES.keys()
|
46 |
)
|
|
|
64 |
|
65 |
for key in configs.keys():
|
66 |
for k, v in configs[key].items():
|
67 |
+
if k in [
|
68 |
+
"input_shapes",
|
69 |
+
"generate_kwargs",
|
70 |
+
"call_kwargs",
|
71 |
+
"numactl_kwargs",
|
72 |
+
]:
|
73 |
configs[key][k] = eval(v)
|
74 |
|
75 |
configs["process"] = ProcessConfig(**configs.pop("process"))
|
|
|
96 |
raise gr.Error("Please login to be able to run the benchmark.")
|
97 |
|
98 |
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S")
|
99 |
+
user_name = whoami(oauth_token.token)["name"]
|
100 |
+
repo_id = f"{user_name}/benchmarks"
|
101 |
+
folder = f"{timestamp}"
|
102 |
|
103 |
+
gr.Info(f"📩 Benchmark will be saved under {repo_id} in the folder {folder}")
|
104 |
|
105 |
outputs = {backend: "Running..." for backend in BACKENDS}
|
106 |
configs = parse_configs(inputs)
|
|
|
118 |
benchmark_report = Benchmark.launch(benchmark_config)
|
119 |
|
120 |
benchmark_config.push_to_hub(
|
121 |
+
repo_id=repo_id,
|
122 |
subfolder=benchmark_name,
|
123 |
+
token=oauth_token.token,
|
124 |
)
|
125 |
benchmark_report.push_to_hub(
|
126 |
+
repo_id=repo_id,
|
127 |
subfolder=benchmark_name,
|
128 |
+
token=oauth_token.token,
|
129 |
)
|
130 |
|
131 |
except GatedRepoError:
|