McClain commited on
Commit
f24c655
·
verified ·
1 Parent(s): 84e65fb

Upload folder using huggingface_hub

Browse files
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.9
__pycache__/handler.cpython-311.pyc ADDED
Binary file (3.6 kB). View file
 
__pycache__/handler.cpython-39.pyc CHANGED
Binary files a/__pycache__/handler.cpython-39.pyc and b/__pycache__/handler.cpython-39.pyc differ
 
handler.py CHANGED
@@ -5,6 +5,8 @@ from typing import Dict, Any, List
5
  import requests
6
  import numpy as np
7
  from fashion_clip.fashion_clip import FashionCLIP
 
 
8
 
9
 
10
  class EndpointHandler:
@@ -23,30 +25,35 @@ class EndpointHandler:
23
  List[Dict[str, Any]]: The embeddings for the text and/or images.
24
  """
25
  # Extract text and images from the input data
26
- texts = data.get("text", [])
27
- images = data.get("image", [])
 
28
 
29
  # Convert image URLs to PIL Images if needed
30
  images = [self._load_image(img) for img in images]
31
 
32
- image_embeddings = self.fclip.encode_images(images, batch_size=32)
33
- text_embeddings = self.fclip.encode_text(texts, batch_size=32)
34
-
35
- image_embeddings = image_embeddings/np.linalg.norm(image_embeddings, ord=2, axis=-1, keepdims=True)
36
- text_embeddings = text_embeddings/np.linalg.norm(text_embeddings, ord=2, axis=-1, keepdims=True)
37
 
38
- results = {
39
- "image_embeddings": image_embeddings.tolist(),
40
- "text_embeddings": text_embeddings.tolist()
41
- }
 
 
 
 
 
42
 
43
  return results
44
 
45
  def _load_image(self, img):
46
- """Helper function to load an image from a URL, PIL Image, or numpy array."""
47
  if isinstance(img, str):
48
  # If the image is a URL
49
  img = Image.open(requests.get(img, stream=True).raw)
 
 
 
50
  elif isinstance(img, Image.Image):
51
  # If the image is already a PIL Image
52
  pass
 
5
  import requests
6
  import numpy as np
7
  from fashion_clip.fashion_clip import FashionCLIP
8
+ from io import BytesIO
9
+
10
 
11
 
12
  class EndpointHandler:
 
25
  List[Dict[str, Any]]: The embeddings for the text and/or images.
26
  """
27
  # Extract text and images from the input data
28
+ texts = data['inputs'].get("text", [])
29
+ images = data['inputs'].get("image", [])
30
+
31
 
32
  # Convert image URLs to PIL Images if needed
33
  images = [self._load_image(img) for img in images]
34
 
35
+ results = {}
 
 
 
 
36
 
37
+ if images:
38
+ image_embeddings = self.fclip.encode_images(images, batch_size=32)
39
+ image_embeddings = image_embeddings/np.linalg.norm(image_embeddings, ord=2, axis=-1, keepdims=True)
40
+ results["image_embeddings"] = image_embeddings.tolist()
41
+
42
+ if texts:
43
+ text_embeddings = self.fclip.encode_text(texts, batch_size=32)
44
+ text_embeddings = text_embeddings/np.linalg.norm(text_embeddings, ord=2, axis=-1, keepdims=True)
45
+ results["text_embeddings"] = text_embeddings.tolist()
46
 
47
  return results
48
 
49
  def _load_image(self, img):
50
+ """Helper function to load an image from a URL, PIL Image, numpy array, or bytes."""
51
  if isinstance(img, str):
52
  # If the image is a URL
53
  img = Image.open(requests.get(img, stream=True).raw)
54
+ elif isinstance(img, bytes):
55
+ # If the image is in bytes
56
+ img = Image.open(BytesIO(img))
57
  elif isinstance(img, Image.Image):
58
  # If the image is already a PIL Image
59
  pass
test_endpoint.ipynb CHANGED
The diff for this file is too large to render. See raw diff