crossprism
commited on
Commit
•
3da3793
1
Parent(s):
66b34fc
initial commit
Browse files- .gitignore +3 -0
- app.py +29 -0
- helper.py +128 -0
- requirements.txt +2 -0
- test1.jpg +0 -0
- test2.jpg +0 -0
- test3.jpg +0 -0
.gitignore
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
.DS_Store
|
2 |
+
__pycache__
|
3 |
+
*~
|
app.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import os
|
3 |
+
import platform
|
4 |
+
from helper import CoreMLPipeline
|
5 |
+
|
6 |
+
force_tf = os.environ.get('FORCE_TF', False)
|
7 |
+
auth_key = os.environ.get('HF_TOKEN', True)
|
8 |
+
|
9 |
+
config = { "coreml_extractor_repoid":"crossprism/efficientnetv2-21k-fv-m",
|
10 |
+
"coreml_extractor_path":"efficientnetV2M21kExtractor.mlmodel",
|
11 |
+
"tf_extractor_repoid":"crossprism/efficientnetv2-21k-fv-m-tf",
|
12 |
+
"tf_extractor_path":"efficientnetv2-21k-fv-m",
|
13 |
+
"coreml_classifier_repoid":"crossprism/travel_australia_antarctica_landmarks",
|
14 |
+
"coreml_classifier_path":"LandmarksAustraliaAntarcticHead_quant8.mlpackage/Data/com.apple.CoreML/efficientnetV2M21kOceaniaLandmarksHead_quant8.mlmodel",
|
15 |
+
"activation":"softmax"
|
16 |
+
}
|
17 |
+
use_tf = force_tf or (platform.system() != 'Darwin')
|
18 |
+
|
19 |
+
helper = CoreMLPipeline(config, auth_key, use_tf)
|
20 |
+
|
21 |
+
def classify_image(image):
|
22 |
+
resized = image.resize((480,480))
|
23 |
+
return helper.classify(resized)
|
24 |
+
|
25 |
+
image = gr.Image(type='pil')
|
26 |
+
label = gr.Label(num_top_classes=3)
|
27 |
+
|
28 |
+
gr.Interface(fn=classify_image, inputs=image, outputs=label, examples = [["test1.jpg"],["test2.jpg"],["test3.jpg"]]).launch()
|
29 |
+
|
helper.py
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import tensorflow as tf
|
2 |
+
import coremltools as ct
|
3 |
+
import numpy as np
|
4 |
+
import PIL
|
5 |
+
from huggingface_hub import hf_hub_download
|
6 |
+
from huggingface_hub import snapshot_download
|
7 |
+
import os
|
8 |
+
import math
|
9 |
+
|
10 |
+
# Helper class to extract features from one model, and then feed those features into a classification head
|
11 |
+
# Because coremltools will only perform inference on OSX, an alternative tensorflow inference pipeline uses
|
12 |
+
# a tensorflow feature extractor and feeds the features into a dynamically created keras model based on the coreml classification head.
|
13 |
+
class CoreMLPipeline:
|
14 |
+
def __init__(self, config, auth_key, use_tf):
|
15 |
+
self.config = config
|
16 |
+
self.use_tf = use_tf
|
17 |
+
if use_tf:
|
18 |
+
extractor_path = snapshot_download(repo_id=config["tf_extractor_repoid"], use_auth_token = auth_key)
|
19 |
+
else:
|
20 |
+
extractor_path = hf_hub_download(repo_id=config["coreml_extractor_repoid"],
|
21 |
+
filename=config["coreml_extractor_path"], use_auth_token = auth_key)
|
22 |
+
|
23 |
+
classifier_path = hf_hub_download(repo_id=config["coreml_classifier_repoid"], filename=config["coreml_classifier_path"],
|
24 |
+
use_auth_token = auth_key)
|
25 |
+
|
26 |
+
print(f"Loading extractor...{extractor_path}")
|
27 |
+
if use_tf:
|
28 |
+
self.extractor = tf.saved_model.load(os.path.join(extractor_path, config["tf_extractor_path"]))
|
29 |
+
else:
|
30 |
+
self.extractor = ct.models.MLModel(extractor_path)
|
31 |
+
|
32 |
+
print(f"Loading classifier...{classifier_path}")
|
33 |
+
self.classifier = ct.models.MLModel(classifier_path)
|
34 |
+
|
35 |
+
if use_tf:
|
36 |
+
self.make_keras_model()
|
37 |
+
|
38 |
+
#unquantizes values if quantized
|
39 |
+
def realize_weights(self, nnWeights, width):
|
40 |
+
if nnWeights.quantization.numberOfBits == 0:
|
41 |
+
if len(nnWeights.float16Value) > 0:
|
42 |
+
weights = np.frombuffer(nnWeights.float16Value, dtype=np.float16)
|
43 |
+
print(f"found 16 bit {len(nnWeights.float16Value)/2} values")
|
44 |
+
else:
|
45 |
+
weights = np.array(nnWeights.floatValue)
|
46 |
+
elif nnWeights.quantization.numberOfBits == 8:
|
47 |
+
scales = np.array(nnWeights.quantization.linearQuantization.scale)
|
48 |
+
biases = np.array(nnWeights.quantization.linearQuantization.bias)
|
49 |
+
quantized = nnWeights.rawValue
|
50 |
+
classes = len(scales)
|
51 |
+
weights = []
|
52 |
+
for i in range(0,classes):
|
53 |
+
scale = scales[i]
|
54 |
+
bias = biases[i]
|
55 |
+
for j in range(0,width):
|
56 |
+
weights.append(quantized[i*width + j] * scale + bias)
|
57 |
+
weights = np.array(weights)
|
58 |
+
else:
|
59 |
+
print(f"Unsupported quantization: {nnWeights.quantization.numberOfBits}")
|
60 |
+
weights = None
|
61 |
+
return weights
|
62 |
+
|
63 |
+
#Only MacOS can run inference on CoreML models. Convert it to tensorflow to match the tf feature extractor
|
64 |
+
def make_keras_model(self):
|
65 |
+
spec = self.classifier.get_spec()
|
66 |
+
nnClassifier = spec.neuralNetworkClassifier
|
67 |
+
labels = nnClassifier.stringClassLabels.vector
|
68 |
+
input = tf.keras.Input(shape = (1280))
|
69 |
+
if "activation" in self.config:
|
70 |
+
activation = self.config['activation']
|
71 |
+
else:
|
72 |
+
activation = "sigmoid" if len(labels) == 1 else "softmax"
|
73 |
+
x = tf.keras.layers.Dense(len(labels), activation = activation)(input)
|
74 |
+
model = tf.keras.Model(input,x, trainable = False)
|
75 |
+
weights = self.realize_weights(nnClassifier.layers[0].innerProduct.weights,1280)
|
76 |
+
weights = weights.reshape((len(labels),1280))
|
77 |
+
weights = weights.T
|
78 |
+
|
79 |
+
bias = self.realize_weights(nnClassifier.layers[0].innerProduct.bias, len(labels))
|
80 |
+
bias.reshape(1,len(labels))
|
81 |
+
model.set_weights([weights,bias])
|
82 |
+
self.tf_model = model
|
83 |
+
self.labels = labels
|
84 |
+
import math
|
85 |
+
|
86 |
+
def softmax_dict(self, input_dict):
|
87 |
+
"""
|
88 |
+
Compute the softmax of a dictionary of values.
|
89 |
+
|
90 |
+
Args:
|
91 |
+
input_dict (dict): A dictionary with numerical values.
|
92 |
+
|
93 |
+
Returns:
|
94 |
+
dict: A dictionary with the same keys where the values are the softmax of the input values.
|
95 |
+
"""
|
96 |
+
# Compute the exponential of all the values
|
97 |
+
exp_values = {k: math.exp(v) for k, v in input_dict.items()}
|
98 |
+
|
99 |
+
# Compute the sum of all exponential values
|
100 |
+
sum_exp_values = sum(exp_values.values())
|
101 |
+
|
102 |
+
# Compute the softmax by dividing each exponential value by the sum of all exponential values
|
103 |
+
softmax_values = {k: v / sum_exp_values for k, v in exp_values.items()}
|
104 |
+
|
105 |
+
return softmax_values
|
106 |
+
|
107 |
+
|
108 |
+
|
109 |
+
def classify(self,resized):
|
110 |
+
if self.use_tf:
|
111 |
+
image = tf.image.convert_image_dtype(resized, tf.float32)
|
112 |
+
image = tf.expand_dims(image, 0)
|
113 |
+
features = self.extractor.signatures['serving_default'](image)
|
114 |
+
input = {"input_1":features["output_1"]}
|
115 |
+
output = self.tf_model.predict(input)
|
116 |
+
results = {}
|
117 |
+
for i,label in enumerate(self.labels):
|
118 |
+
results[label] = output[0][i]
|
119 |
+
else:
|
120 |
+
features = self.extractor.predict({"image":resized})
|
121 |
+
features = features["Identity"]
|
122 |
+
output = self.classifier.predict({"features":features[0]})
|
123 |
+
results = output["Identity"]
|
124 |
+
if "activation" in self.config and self.config["activation"] == "softmax":
|
125 |
+
results = self.softmax_dict(results)
|
126 |
+
|
127 |
+
return results
|
128 |
+
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
coremltools==7.2
|
2 |
+
tensorflow==2.15
|
test1.jpg
ADDED
test2.jpg
ADDED
test3.jpg
ADDED