Skip to content

Commit

Permalink
fix: replace transform ndarray with transform blob (#910)
Browse files Browse the repository at this point in the history
* fix: replace transform ndarray with transform blob

* fix: tests

* fix: check empty blob
  • Loading branch information
ZiniuYu committed Apr 14, 2023
1 parent 1888ef6 commit 35733a0
Show file tree
Hide file tree
Showing 5 changed files with 10 additions and 11 deletions.
2 changes: 1 addition & 1 deletion server/clip_server/executors/clip_onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def __init__(
self._model = CLIPOnnxModel(name, model_path, dtype)
self._tokenizer = Tokenizer(name)

self._image_transform = clip._transform_ndarray(self._model.image_size)
self._image_transform = clip._transform_blob(self._model.image_size)

# define the priority order for the execution providers
providers = ['CPUExecutionProvider']
Expand Down
2 changes: 1 addition & 1 deletion server/clip_server/executors/clip_tensorrt.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def __init__(
self._model.start_engines()

self._tokenizer = Tokenizer(name)
self._image_transform = clip._transform_ndarray(self._model.image_size)
self._image_transform = clip._transform_blob(self._model.image_size)

if not self.tracer:
self.tracer = NoOpTracer()
Expand Down
2 changes: 1 addition & 1 deletion server/clip_server/executors/clip_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def __init__(
name, device=self._device, jit=jit, dtype=dtype, **kwargs
)
self._tokenizer = Tokenizer(name)
self._image_transform = clip._transform_ndarray(self._model.image_size)
self._image_transform = clip._transform_blob(self._model.image_size)

if not self.tracer:
self.tracer = NoOpTracer()
Expand Down
11 changes: 5 additions & 6 deletions server/clip_server/executors/helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,14 +33,13 @@ def preproc_image(

for d in da:
content = d.content

if d.blob:
d.convert_blob_to_image_tensor()
elif d.tensor is None and d.uri:
if d.tensor is not None:
d.convert_image_tensor_to_blob()
elif d.content_type != 'blob' and d.uri:
# in case user uses HTTP protocol and send data via curl not using .blob (base64), but in .uri
d.load_uri_to_image_tensor()
d.load_uri_to_blob()

tensors_batch.append(preprocess_fn(d.tensor).detach())
tensors_batch.append(preprocess_fn(d.blob).detach())

# recover doc content
d.content = content
Expand Down
4 changes: 2 additions & 2 deletions tests/test_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,15 +87,15 @@ def test_split_img_txt_da(inputs):
[
Document(
uri='https://clip-as-service.jina.ai/_static/favicon.png',
).load_uri_to_blob(),
).load_uri_to_image_tensor(),
]
)
],
)
def test_preproc_image(inputs):
from clip_server.model import clip

preprocess_fn = clip._transform_ndarray(224)
preprocess_fn = clip._transform_blob(224)
da, pixel_values = preproc_image(inputs, preprocess_fn, drop_image_content=True)
assert len(da) == 1
assert not da[0].blob
Expand Down

0 comments on commit 35733a0

Please sign in to comment.