forked from cloudera/HuggingFace-Spaces
/
huggingface-space-catalog-default.yaml
170 lines (170 loc) · 6.47 KB
/
huggingface-space-catalog-default.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
name: Hugging Face
entries:
- title: IDEFICS Playground
label: Idefics
short_description: This demo showcases IDEFICS, a open-access large visual language model.
long_description: >-
This demo showcases IDEFICS, a open-access large visual language model. Like GPT-4, the multimodal model accepts arbitrary sequences of image and text inputs and produces text outputs. IDEFICS can answer questions about images, describe visual content, create stories grounded in multiple images, etc. IDEFICS (which stands for Image-aware Decoder Enhanced à la Flamingo with Interleaved Cross-attentionS) is an open-access reproduction of Flamingo, a closed-source visual language model developed by Deepmind. IDEFICS was built solely on publicly available data and models. It is currently the only visual language model of this scale (80 billion parameters) that is available in open-access.
tags:
- Gradio
- Space
- Idefics
git_url: "https://huggingface.co/spaces/HuggingFaceM4/idefics_playground"
is_prototype: true
is_huggingface_space: true
enabled: false
environment_variables:
cpu:
default: 2
description: "Number of CPUs"
memory:
default: 16
description: "Memory in GB"
gpu:
default: 0
description: "Number of GPUs"
HF_AUTH_TOKEN:
default: ""
description: "Hugging Face Token (HuggingFace->Settings->Access Tokens)"
required: true
tooltip: "Get your token from HuggingFace->Settings->Access Tokens"
- title: Code Llama Playground 13B
label: Llama-13B
short_description: This is a demo to generate text and code with the following Code Llama model (13B).
long_description: >-
This is a demo to generate text and code with the following Code Llama model (13B). Please note that this
model is not designed for instruction purposes but for code completion.
tags:
- Gradio
- Space
- Llama
- 13B
git_url: "https://huggingface.co/spaces/codellama/codellama-playground"
is_prototype: true
is_huggingface_space: true
environment_variables:
cpu:
default: 2
description: "Number of CPUs"
memory:
default: 16
description: "Memory in GB"
gpu:
default: 0
description: "Number of GPUs"
HF_TOKEN:
default: ""
description: "Hugging Face Token (HuggingFace->Settings->Access Tokens)"
required: true
tooltip: "Get your token from HuggingFace->Settings->Access Tokens"
- title: Open LLM Leaderboard
label: open-LLM
short_description: The Open LLM Leaderboard aims to track, rank and evaluate open LLMs and chatbots.
long_description: >-
The Open LLM Leaderboard aims to track, rank and evaluate open LLMs and chatbots. Submit a model for automated
evaluation on the GPU cluster on the "Submit" page! The leaderboard's backend runs the great Eleuther AI
Language Model Evaluation Harness - https://github.com/EleutherAI/lm-evaluation-harness
tags:
- Gradio
- Space
- LLM
git_url: "https://huggingface.co/spaces/smothiki/open_llm_leaderboard"
is_prototype: true
is_huggingface_space: true
environment_variables:
cpu:
default: 2
description: "Number of CPUs"
memory:
default: 16
description: "Memory in GB"
gpu:
default: 0
description: "Number of GPUs"
HF_TOKEN:
default: ""
description: "Hugging Face Token (HuggingFace->Settings->Access Tokens)"
required: true
tooltip: "Get your token from HuggingFace->Settings->Access Tokens"
- title: Mistral 7B Instruct
label: Mistral-7B
short_description: In this demo, you can chat with Mistral-7B-Instruct model.
long_description: >-
In this demo, you can chat with Mistral-7B-Instruct model. Mistral-7B-v0.1 is a
decoder-based LM with the following architectural choices - Sliding Window Attention,
GQA (Grouped Query Attention) & Byte-fallback BPE tokenizer.
tags:
- Gradio
- Space
- Llama
- 13B
git_url: "https://huggingface.co/spaces/osanseviero/mistral-super-fast"
is_prototype: true
is_huggingface_space: true
environment_variables:
cpu:
default: 2
description: "Number of CPUs"
memory:
default: 16
description: "Memory in GB"
gpu:
default: 0
description: "Number of GPUs"
HF_TOKEN:
default: ""
description: "Hugging Face Token (HuggingFace->Settings->Access Tokens)"
required: true
tooltip: "Get your token from HuggingFace->Settings->Access Tokens"
HUGGING_FACE_HUB_TOKEN:
default: ""
description: "Hugging Face Token (HuggingFace->Settings->Access Tokens)"
required: true
tooltip: "Get your token from HuggingFace->Settings->Access Tokens"
- title: Chat with DeepSeek Coder 33B
label: DeepSeek-33B
short_description: In this demo, you can make use of DeepSeek 33B parameters model.
long_description: >-
In this demo, you can chat with DeepSeek-33B model which is a 33B parameters
model that has been fine-tuned for chat instructions. More details can be found
here - https://huggingface.co/deepseek-ai/deepseek-coder-33b-instruct
tags:
- Gradio
- Space
- DeepSeek
- 33B
git_url: "https://huggingface.co/spaces/deepseek-ai/deepseek-coder-33b-instruct"
is_prototype: true
is_huggingface_space: true
environment_variables:
cpu:
default: 4
description: "Number of CPUs"
memory:
default: 32
description: "Memory in GB"
gpu:
default: 4
description: "Number of GPUs"
HF_TOKEN:
default: ""
description: "Hugging Face Token (HuggingFace->Settings->Access Tokens)"
required: false
tooltip: "Get your token from HuggingFace->Settings->Access Tokens"
HUGGING_FACE_HUB_TOKEN:
default: "my_default"
description: "my_description"
required: true
tooltip: "my_tooltip"
- title: Can You Run It? LLM version
label: Run-llm-version
short_description: This is a demo to check if LLM can be executed,
long_description: >-
This is a demo to check if LLM can be executed.
tags:
- Streamlit
- Space
- LLM
git_url: "https://huggingface.co/spaces/Vokturz/can-it-run-llm"
is_prototype: true
is_huggingface_space: true