π docs(standardization): update categorical listings
Browse files- correct duplicate usages across readme/body
- classify 'prithivMLmods/Deepfake-Detection-Exp-02-22', formerly wrongly marked between both description sections
- explicitly denote 'prithivMLmods/Deepfake-Detection-Exp-02-22'
- enumerate visible models to clearly distinguish from previous instance models
π fix(model): augment model selections
- add model
- add additional entry of 'ideepankarsharma2003/AI_ImageClassification_MidjourneyV6_SDXL' to MODEL_PATHS
π refactor(model): consolidating model usage
- add model
- add clf_6, feature_extractor_6, and model_6 in prediction function to distinguish newly added (ideepankarsharma2003/AI_ImageClassification_MidjourneyV6_SDXL') classes, respectively
- modify separate statements to assign return to completed function
π refactor(model): improving label classification
- update classification of previously unlisted model 6 in proper configuration
@@ -13,6 +13,7 @@ preload_from_hub:
|
|
13 |
- cmckinle/sdxl-flux-detector
|
14 |
- Organika/sdxl-detector
|
15 |
- prithivMLmods/Deep-Fake-Detector-v2-Model
|
|
|
16 |
---
|
17 |
|
18 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
13 |
- cmckinle/sdxl-flux-detector
|
14 |
- Organika/sdxl-detector
|
15 |
- prithivMLmods/Deep-Fake-Detector-v2-Model
|
16 |
+
- prithivMLmods/Deepfake-Detection-Exp-02-22
|
17 |
---
|
18 |
|
19 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
@@ -26,7 +26,8 @@ MODEL_PATHS = {
|
|
26 |
"model_3": "Organika/sdxl-detector",
|
27 |
"model_4": "cmckinle/sdxl-flux-detector",
|
28 |
"model_5": "prithivMLmods/Deep-Fake-Detector-v2-Model",
|
29 |
-
"model_5b": "prithivMLmods/Deepfake-Detection-Exp-02-22"
|
|
|
30 |
}
|
31 |
|
32 |
CLASS_NAMES = {
|
@@ -35,7 +36,9 @@ CLASS_NAMES = {
|
|
35 |
"model_3": ['AI', 'Real'],
|
36 |
"model_4": ['AI', 'Real'],
|
37 |
"model_5": ['Realism', 'Deepfake'],
|
38 |
-
"model_5b": ['Real', 'Deepfake']
|
|
|
|
|
39 |
}
|
40 |
|
41 |
# Load models and processors
|
@@ -45,6 +48,11 @@ def load_models():
|
|
45 |
model_1 = model_1.to(device)
|
46 |
clf_1 = pipeline(model=model_1, task="image-classification", image_processor=image_processor_1, device=device)
|
47 |
|
|
|
|
|
|
|
|
|
|
|
48 |
clf_2 = pipeline("image-classification", model=MODEL_PATHS["model_2"], device=device)
|
49 |
|
50 |
feature_extractor_3 = AutoFeatureExtractor.from_pretrained(MODEL_PATHS["model_3"], device=device)
|
@@ -56,9 +64,9 @@ def load_models():
|
|
56 |
clf_5 = pipeline("image-classification", model=MODEL_PATHS["model_5"], device=device)
|
57 |
clf_5b = pipeline("image-classification", model=MODEL_PATHS["model_5b"], device=device)
|
58 |
|
59 |
-
return clf_1, clf_2, feature_extractor_3, model_3, feature_extractor_4, model_4, clf_5, clf_5b
|
60 |
|
61 |
-
clf_1, clf_2, feature_extractor_3, model_3, feature_extractor_4, model_4, clf_5, clf_5b = load_models()
|
62 |
|
63 |
@spaces.GPU(duration=10)
|
64 |
def predict_with_model(img_pil, clf, class_names, confidence_threshold, model_name, model_id, feature_extractor=None):
|
@@ -110,6 +118,7 @@ def predict_image(img, confidence_threshold):
|
|
110 |
label_4, result_4output = predict_with_model(img_pil, model_4, CLASS_NAMES["model_4"], confidence_threshold, "SDXL + FLUX", 4, feature_extractor_4)
|
111 |
label_5, result_5output = predict_with_model(img_pilvits, clf_5, CLASS_NAMES["model_5"], confidence_threshold, "ViT-base Newcomer", 5)
|
112 |
label_5b, result_5boutput = predict_with_model(img_pilvits, clf_5b, CLASS_NAMES["model_5b"], confidence_threshold, "ViT-base Newcomer", 6)
|
|
|
113 |
|
114 |
combined_results = {
|
115 |
"SwinV2/detect": label_1,
|
@@ -117,10 +126,11 @@ def predict_image(img, confidence_threshold):
|
|
117 |
"Swin/SDXL": label_3,
|
118 |
"Swin/SDXL-FLUX": label_4,
|
119 |
"prithivMLmods": label_5,
|
120 |
-
"prithivMLmods-2-22": label_5b
|
|
|
121 |
}
|
122 |
|
123 |
-
combined_outputs = [result_1output, result_2output, result_3output, result_4output, result_5output, result_5boutput]
|
124 |
return img_pil, combined_outputs
|
125 |
# Define a function to generate the HTML content
|
126 |
|
|
|
26 |
"model_3": "Organika/sdxl-detector",
|
27 |
"model_4": "cmckinle/sdxl-flux-detector",
|
28 |
"model_5": "prithivMLmods/Deep-Fake-Detector-v2-Model",
|
29 |
+
"model_5b": "prithivMLmods/Deepfake-Detection-Exp-02-22",
|
30 |
+
"model_6": "ideepankarsharma2003/AI_ImageClassification_MidjourneyV6_SDXL"
|
31 |
}
|
32 |
|
33 |
CLASS_NAMES = {
|
|
|
36 |
"model_3": ['AI', 'Real'],
|
37 |
"model_4": ['AI', 'Real'],
|
38 |
"model_5": ['Realism', 'Deepfake'],
|
39 |
+
"model_5b": ['Real', 'Deepfake'],
|
40 |
+
"model_6": ['ai_gen', 'human'],
|
41 |
+
|
42 |
}
|
43 |
|
44 |
# Load models and processors
|
|
|
48 |
model_1 = model_1.to(device)
|
49 |
clf_1 = pipeline(model=model_1, task="image-classification", image_processor=image_processor_1, device=device)
|
50 |
|
51 |
+
image_processor_6 = AutoImageProcessor.from_pretrained(MODEL_PATHS["model_6"], use_fast=True)
|
52 |
+
model_6 = Swinv2ForImageClassification.from_pretrained(MODEL_PATHS["model_6"])
|
53 |
+
model_6 = model_6.to(device)
|
54 |
+
clf_6 = pipeline(model=model_6, task="image-classification", image_processor=image_processor_6, device=device)
|
55 |
+
|
56 |
clf_2 = pipeline("image-classification", model=MODEL_PATHS["model_2"], device=device)
|
57 |
|
58 |
feature_extractor_3 = AutoFeatureExtractor.from_pretrained(MODEL_PATHS["model_3"], device=device)
|
|
|
64 |
clf_5 = pipeline("image-classification", model=MODEL_PATHS["model_5"], device=device)
|
65 |
clf_5b = pipeline("image-classification", model=MODEL_PATHS["model_5b"], device=device)
|
66 |
|
67 |
+
return clf_1, clf_2, feature_extractor_3, model_3, feature_extractor_4, model_4, clf_5, clf_5b, clf_6
|
68 |
|
69 |
+
clf_1, clf_2, feature_extractor_3, model_3, feature_extractor_4, model_4, clf_5, clf_5b, clf_6 = load_models()
|
70 |
|
71 |
@spaces.GPU(duration=10)
|
72 |
def predict_with_model(img_pil, clf, class_names, confidence_threshold, model_name, model_id, feature_extractor=None):
|
|
|
118 |
label_4, result_4output = predict_with_model(img_pil, model_4, CLASS_NAMES["model_4"], confidence_threshold, "SDXL + FLUX", 4, feature_extractor_4)
|
119 |
label_5, result_5output = predict_with_model(img_pilvits, clf_5, CLASS_NAMES["model_5"], confidence_threshold, "ViT-base Newcomer", 5)
|
120 |
label_5b, result_5boutput = predict_with_model(img_pilvits, clf_5b, CLASS_NAMES["model_5b"], confidence_threshold, "ViT-base Newcomer", 6)
|
121 |
+
label_6, result_6output = predict_with_model(img_pilvits, clf_6, CLASS_NAMES["model_6"], confidence_threshold, "Swin Midjourney/SDXL", 6)
|
122 |
|
123 |
combined_results = {
|
124 |
"SwinV2/detect": label_1,
|
|
|
126 |
"Swin/SDXL": label_3,
|
127 |
"Swin/SDXL-FLUX": label_4,
|
128 |
"prithivMLmods": label_5,
|
129 |
+
"prithivMLmods-2-22": label_5b,
|
130 |
+
"SwinMidSDXL": label_6
|
131 |
}
|
132 |
|
133 |
+
combined_outputs = [result_1output, result_2output, result_3output, result_4output, result_5output, result_5boutput, result_6output]
|
134 |
return img_pil, combined_outputs
|
135 |
# Define a function to generate the HTML content
|
136 |
|