Skip to content

Commit

Permalink
Update organization
Browse files Browse the repository at this point in the history
  • Loading branch information
Niels Rogge authored and Niels Rogge committed Nov 7, 2022
1 parent 545b026 commit 5411c16
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 17 deletions.
29 changes: 14 additions & 15 deletions src/transformers/models/clipseg/modeling_clipseg.py
Expand Up @@ -39,11 +39,10 @@
logger = logging.get_logger(__name__)


_CHECKPOINT_FOR_DOC = "nielsr/clipseg-rd64-refined"
_CHECKPOINT_FOR_DOC = "CIDAS/clipseg-rd64-refined"

CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST = [
# TODO update organziation
"nielsr/clipseg-rd64-refined",
"CIDAS/clipseg-rd64-refined",
# See all CLIPSeg models at https://huggingface.co/models?filter=clipseg
]

Expand Down Expand Up @@ -807,8 +806,8 @@ def forward(
```python
>>> from transformers import CLIPTokenizer, CLIPSegTextModel
>>> tokenizer = CLIPTokenizer.from_pretrained("nielsr/clipseg-rd64-refined")
>>> model = CLIPSegTextModel.from_pretrained("nielsr/clipseg-rd64-refined")
>>> tokenizer = CLIPTokenizer.from_pretrained("CIDAS/clipseg-rd64-refined")
>>> model = CLIPSegTextModel.from_pretrained("CIDAS/clipseg-rd64-refined")
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
Expand Down Expand Up @@ -918,8 +917,8 @@ def forward(
>>> import requests
>>> from transformers import CLIPSegProcessor, CLIPSegVisionModel
>>> processor = CLIPSegProcessor.from_pretrained("nielsr/clipseg-rd64-refined")
>>> model = CLIPSegVisionModel.from_pretrained("nielsr/clipseg-rd64-refined")
>>> processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
>>> model = CLIPSegVisionModel.from_pretrained("CIDAS/clipseg-rd64-refined")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
Expand Down Expand Up @@ -994,8 +993,8 @@ def get_text_features(
```python
>>> from transformers import CLIPTokenizer, CLIPSegModel
>>> tokenizer = CLIPTokenizer.from_pretrained("nielsr/clipseg-rd64-refined")
>>> model = CLIPSegModel.from_pretrained("nielsr/clipseg-rd64-refined")
>>> tokenizer = CLIPTokenizer.from_pretrained("CIDAS/clipseg-rd64-refined")
>>> model = CLIPSegModel.from_pretrained("CIDAS/clipseg-rd64-refined")
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
>>> text_features = model.get_text_features(**inputs)
Expand Down Expand Up @@ -1041,8 +1040,8 @@ def get_image_features(
>>> import requests
>>> from transformers import CLIPSegProcessor, CLIPSegModel
>>> processor = CLIPSegProcessor.from_pretrained("nielsr/clipseg-rd64-refined")
>>> model = CLIPSegModel.from_pretrained("nielsr/clipseg-rd64-refined")
>>> processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
>>> model = CLIPSegModel.from_pretrained("CIDAS/clipseg-rd64-refined")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
Expand Down Expand Up @@ -1093,8 +1092,8 @@ def forward(
>>> import requests
>>> from transformers import CLIPSegProcessor, CLIPSegModel
>>> processor = CLIPSegProcessor.from_pretrained("nielsr/clipseg-rd64-refined")
>>> model = CLIPSegModel.from_pretrained("nielsr/clipseg-rd64-refined")
>>> processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
>>> model = CLIPSegModel.from_pretrained("CIDAS/clipseg-rd64-refined")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
Expand Down Expand Up @@ -1403,8 +1402,8 @@ def forward(
>>> from PIL import Image
>>> import requests
>>> processor = CLIPSegProcessor.from_pretrained("nielsr/clipseg-rd64-refined")
>>> model = CLIPSegForImageSegmentation.from_pretrained("nielsr/clipseg-rd64-refined")
>>> processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
>>> model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
Expand Down
3 changes: 1 addition & 2 deletions tests/models/clipseg/test_modeling_clipseg.py
Expand Up @@ -706,8 +706,7 @@ def prepare_img():
class CLIPSegModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_image_segmentation(self):
# TODO update to appropriate organization
model_name = "nielsr/clipseg-rd64-refined"
model_name = "CIDAS/clipseg-rd64-refined"
processor = CLIPSegProcessor.from_pretrained(model_name)
model = CLIPSegForImageSegmentation.from_pretrained(model_name).to(torch_device)

Expand Down

0 comments on commit 5411c16

Please sign in to comment.