Skip to content

Commit 41f9658

Browse files
author
Jacob
committed
reformat files with black
1 parent 5239eae commit 41f9658

File tree

6 files changed

+42
-25
lines changed

6 files changed

+42
-25
lines changed

Diff for: clip_retrieval/clip_back.py

+26-19
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
"""Clip back: host a knn service using clip as an encoder"""
22

3-
43
from typing import Callable, Dict, Any, List
54
from flask import Flask, request, make_response
65
from flask_restful import Resource, Api
@@ -809,27 +808,35 @@ def dict_to_clip_options(d, clip_options):
809808
indice_folder=d["indice_folder"] if "indice_folder" in d else clip_options.indice_folder,
810809
clip_model=d["clip_model"] if "clip_model" in d else clip_options.clip_model,
811810
enable_hdf5=d["enable_hdf5"] if "enable_hdf5" in d else clip_options.enable_hdf5,
812-
enable_faiss_memory_mapping=d["enable_faiss_memory_mapping"]
813-
if "enable_faiss_memory_mapping" in d
814-
else clip_options.enable_faiss_memory_mapping,
811+
enable_faiss_memory_mapping=(
812+
d["enable_faiss_memory_mapping"]
813+
if "enable_faiss_memory_mapping" in d
814+
else clip_options.enable_faiss_memory_mapping
815+
),
815816
columns_to_return=d["columns_to_return"] if "columns_to_return" in d else clip_options.columns_to_return,
816-
reorder_metadata_by_ivf_index=d["reorder_metadata_by_ivf_index"]
817-
if "reorder_metadata_by_ivf_index" in d
818-
else clip_options.reorder_metadata_by_ivf_index,
819-
enable_mclip_option=d["enable_mclip_option"]
820-
if "enable_mclip_option" in d
821-
else clip_options.enable_mclip_option,
817+
reorder_metadata_by_ivf_index=(
818+
d["reorder_metadata_by_ivf_index"]
819+
if "reorder_metadata_by_ivf_index" in d
820+
else clip_options.reorder_metadata_by_ivf_index
821+
),
822+
enable_mclip_option=(
823+
d["enable_mclip_option"] if "enable_mclip_option" in d else clip_options.enable_mclip_option
824+
),
822825
use_jit=d["use_jit"] if "use_jit" in d else clip_options.use_jit,
823826
use_arrow=d["use_arrow"] if "use_arrow" in d else clip_options.use_arrow,
824-
provide_safety_model=d["provide_safety_model"]
825-
if "provide_safety_model" in d
826-
else clip_options.provide_safety_model,
827-
provide_violence_detector=d["provide_violence_detector"]
828-
if "provide_violence_detector" in d
829-
else clip_options.provide_violence_detector,
830-
provide_aesthetic_embeddings=d["provide_aesthetic_embeddings"]
831-
if "provide_aesthetic_embeddings" in d
832-
else clip_options.provide_aesthetic_embeddings,
827+
provide_safety_model=(
828+
d["provide_safety_model"] if "provide_safety_model" in d else clip_options.provide_safety_model
829+
),
830+
provide_violence_detector=(
831+
d["provide_violence_detector"]
832+
if "provide_violence_detector" in d
833+
else clip_options.provide_violence_detector
834+
),
835+
provide_aesthetic_embeddings=(
836+
d["provide_aesthetic_embeddings"]
837+
if "provide_aesthetic_embeddings" in d
838+
else clip_options.provide_aesthetic_embeddings
839+
),
833840
)
834841

835842

Diff for: clip_retrieval/clip_filter.py

-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
"""clip filter is a tool to use a knn index and a image/text collection to extract interesting subsets"""
22

3-
43
import fire
54

65

Diff for: clip_retrieval/clip_inference/reader.py

+4
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,7 @@ def join(new_set):
5151

5252
return keys, text_files, image_files, metadata_files
5353

54+
5455
class ImageDataset(Dataset):
5556
"""ImageDataset is a pytorch Dataset exposing image and text tensors from a folder of image and text"""
5657

@@ -114,6 +115,7 @@ def __getitem__(self, ind):
114115

115116
return output
116117

118+
117119
def get_image_dataset():
118120
"""retrieve image dataset module without importing torch at the top level"""
119121
return ImageDataset
@@ -177,10 +179,12 @@ def preprocess_dataset(item):
177179
transformed_dataset = filtered_dataset.map(preprocess_dataset, handler=wds.handlers.warn_and_continue)
178180
return transformed_dataset
179181

182+
180183
def collate_fn(batch):
181184
batch = list(filter(lambda x: x is not None, batch))
182185
return default_collate(batch)
183186

187+
184188
def dataset_to_dataloader(dataset, batch_size, num_prepro_workers, input_format):
185189
"""Create a pytorch dataloader from a dataset"""
186190

Diff for: clip_retrieval/clip_inference/runner.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -55,9 +55,9 @@ def __call__(self, i):
5555
"inference_duration": inference_duration,
5656
"write_duration": write_duration,
5757
"total_duration": end_time - begin_time,
58-
"sample_count": batch["image_tensor"].shape[0]
59-
if "image_tensor" in batch
60-
else batch["text_tokens"].shape[0],
58+
"sample_count": (
59+
batch["image_tensor"].shape[0] if "image_tensor" in batch else batch["text_tokens"].shape[0]
60+
),
6161
}
6262
)
6363
logger.end()

Diff for: tests/test_clip_client.py

+1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
"""Test the ClipClient class."""
2+
23
import logging
34
import pytest
45

Diff for: tests/test_end2end.py

+8-2
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,15 @@
1818
test_list = [
1919
["first", "https://upload.wikimedia.org/wikipedia/commons/3/3a/Cat03.jpg"],
2020
["second", "https://upload.wikimedia.org/wikipedia/commons/d/d4/Cat_March_2010-1a.jpg"],
21-
["third", "https://upload.wikimedia.org/wikipedia/commons/thumb/7/78/Baby_cat_turning.jpg/800px-Baby_cat_turning.jpg"],
21+
[
22+
"third",
23+
"https://upload.wikimedia.org/wikipedia/commons/thumb/7/78/Baby_cat_turning.jpg/800px-Baby_cat_turning.jpg",
24+
],
2225
["fourth", "https://upload.wikimedia.org/wikipedia/commons/5/51/Boscoe2_%288571299519%29.jpg"],
23-
["fifth", "https://upload.wikimedia.org/wikipedia/commons/thumb/5/50/Cat_004_%286098630659%29.jpg/1280px-Cat_004_%286098630659%29.jpg"],
26+
[
27+
"fifth",
28+
"https://upload.wikimedia.org/wikipedia/commons/thumb/5/50/Cat_004_%286098630659%29.jpg/1280px-Cat_004_%286098630659%29.jpg",
29+
],
2430
[None, "https://upload.wikimedia.org/wikipedia/commons/e/e1/Cats_144_%287179618326%29.jpg"],
2531
]
2632

0 commit comments

Comments
 (0)