modelId
stringlengths 5
139
| author
stringlengths 2
42
| last_modified
timestamp[us, tz=UTC]date 2020-02-15 11:33:14
2025-07-30 00:44:18
| downloads
int64 0
223M
| likes
int64 0
11.7k
| library_name
stringclasses 536
values | tags
listlengths 1
4.05k
| pipeline_tag
stringclasses 55
values | createdAt
timestamp[us, tz=UTC]date 2022-03-02 23:29:04
2025-07-30 00:43:43
| card
stringlengths 11
1.01M
|
---|---|---|---|---|---|---|---|---|---|
ma90237509172/plasticworld
|
ma90237509172
| 2025-06-22T07:13:42Z | 0 | 0 | null |
[
"license:creativeml-openrail-m",
"region:us"
] | null | 2025-06-22T07:08:01Z |
---
license: creativeml-openrail-m
---
|
Shuu12121/CodeSearch-ModernBERT-Owl-3.0-Plus
|
Shuu12121
| 2025-06-22T07:06:58Z | 10 | 0 |
sentence-transformers
|
[
"sentence-transformers",
"safetensors",
"modernbert",
"sentence-similarity",
"feature-extraction",
"generated_from_trainer",
"dataset_size:7059200",
"loss:MultipleNegativesRankingLoss",
"en",
"dataset:code-search-net/code_search_net",
"dataset:Shuu12121/python-codesearch-dedupe-filtered-v4",
"dataset:Shuu12121/javascript-codesearch-dedupe-filtered-v4",
"dataset:Shuu12121/java-codesearch-dedupe-filtered-v4",
"dataset:Shuu12121/typescript-codesearch-dedupe-filtered-v4",
"dataset:Shuu12121/php-codesearch-dedupe-filtered-v4",
"dataset:Shuu12121/go-codesearch-dedupe-filtered-v4",
"dataset:Shuu12121/ruby-codesearch-dedupe-filtered-v4",
"dataset:Shuu12121/rust-codesearch-dedupe-filtered-v4",
"base_model:Shuu12121/CodeModernBERT-Owl-3.0",
"base_model:finetune:Shuu12121/CodeModernBERT-Owl-3.0",
"license:apache-2.0",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] |
sentence-similarity
| 2025-06-21T16:16:58Z |
---
tags:
- sentence-transformers
- sentence-similarity
- feature-extraction
- generated_from_trainer
- dataset_size:7059200
- loss:MultipleNegativesRankingLoss
base_model: Shuu12121/CodeModernBERT-Owl-3.0
widget:
- source_sentence: >-
The maximum value of the slider. (default 0) <P>
@return Returns the value of the attribute, or 0, if it hasn't been set by
the JSF file.
sentences:
- |-
@Override
public UpdateSmsChannelResult updateSmsChannel(UpdateSmsChannelRequest request) {
request = beforeClientExecution(request);
return executeUpdateSmsChannel(request);
}
- |-
async function isValidOrigin(origin, sourceOrigin) {
// This will fetch the caches from https://cdn.ampproject.org/caches.json the first time it's
// called. Subsequent calls will receive a cached version.
const officialCacheList = await caches.list();
// Calculate the cache specific origin
const cacheSubdomain = `https://${await createCacheSubdomain(sourceOrigin)}.`;
// Check all caches listed on ampproject.org
for (const cache of officialCacheList) {
const cachedOrigin = cacheSubdomain + cache.cacheDomain;
if (origin === cachedOrigin) {
return true;
}
}
return false;
}
- "public java.lang.Object getMin() {\n\t\treturn (java.lang.Object) getStateHelper().eval(PropertyKeys.min, 0);\n\t}"
- source_sentence: |-
The Method from the Date.getMinutes is deprecated. This is a helper-Method.
@param date
The Date-object to get the minutes.
@return The minutes from the Date-object.
sentences:
- "public static int getMinutes(final Date date)\n\t{\n\t\tfinal Calendar calendar = Calendar.getInstance();\n\t\tcalendar.setTime(date);\n\t\treturn calendar.get(Calendar.MINUTE);\n\t}"
- "func (opts BeeOptions) Bind(name string, dst interface{}) error {\n\tv := opts.Value(name)\n\tif v == nil {\n\t\treturn errors.New(\"Option with name \" + name + \" not found\")\n\t}\n\n\treturn ConvertValue(v, dst)\n}"
- >-
public function createFor(Customer $customer, array $options = [], array
$filters = [])
{
$this->parentId = $customer->id;
return parent::rest_create($options, $filters);
}
- source_sentence: |-
Return a list of all dates from 11/12/2015 to the present.
Args:
boo: if true, list contains Numbers (20151230); if false, list contains Strings ("2015-12-30")
Returns:
list of either Numbers or Strings
sentences:
- |-
def all_days(boo):
earliest = datetime.strptime(('2015-11-12').replace('-', ' '), '%Y %m %d')
latest = datetime.strptime(datetime.today().date().isoformat().replace('-', ' '), '%Y %m %d')
num_days = (latest - earliest).days + 1
all_days = [latest - timedelta(days=x) for x in range(num_days)]
all_days.reverse()
output = []
if boo:
# Return as Integer, yyyymmdd
for d in all_days:
output.append(int(str(d).replace('-', '')[:8]))
else:
# Return as String, yyyy-mm-dd
for d in all_days:
output.append(str(d)[:10])
return output
- "public void setColSize3(Integer newColSize3) {\n\t\tInteger oldColSize3 = colSize3;\n\t\tcolSize3 = newColSize3;\n\t\tif (eNotificationRequired())\n\t\t\teNotify(new ENotificationImpl(this, Notification.SET, AfplibPackage.COLOR_SPECIFICATION__COL_SIZE3, oldColSize3, colSize3));\n\t}"
- >-
public function
deleteCompanyBusinessUnitStoreAddress(CompanyBusinessUnitStoreAddressTransfer
$companyBusinessUnitStoreAddressTransfer): void
{
$this->getFactory()
->createFosCompanyBusinessUnitStoreAddressQuery()
->findOneByIdCompanyBusinessUnitStoreAddress($companyBusinessUnitStoreAddressTransfer->getIdCompanyBusinessUnitStoreAddress())
->delete();
}
- source_sentence: |-
Returns array of basket oxarticle objects
@return array
sentences:
- |-
public function visit(NodeVisitorInterface $visitor)
{
foreach ($this->children as $child)
{
$child->visit($visitor);
}
}
- "func GetColDefaultValue(ctx sessionctx.Context, col *model.ColumnInfo) (types.Datum, error) {\n\treturn getColDefaultValue(ctx, col, col.GetDefaultValue())\n}"
- |-
public function getBasketArticles()
{
$aBasketArticles = [];
/** @var \oxBasketItem $oBasketItem */
foreach ($this->_aBasketContents as $sItemKey => $oBasketItem) {
try {
$oProduct = $oBasketItem->getArticle(true);
if (\OxidEsales\Eshop\Core\Registry::getConfig()->getConfigParam('bl_perfLoadSelectLists')) {
// marking chosen select list
$aSelList = $oBasketItem->getSelList();
if (is_array($aSelList) && ($aSelectlist = $oProduct->getSelectLists($sItemKey))) {
reset($aSelList);
foreach ($aSelList as $conkey => $iSel) {
$aSelectlist[$conkey][$iSel]->selected = 1;
}
$oProduct->setSelectlist($aSelectlist);
}
}
} catch (\OxidEsales\Eshop\Core\Exception\NoArticleException $oEx) {
\OxidEsales\Eshop\Core\Registry::getUtilsView()->addErrorToDisplay($oEx);
$this->removeItem($sItemKey);
$this->calculateBasket(true);
continue;
} catch (\OxidEsales\Eshop\Core\Exception\ArticleInputException $oEx) {
\OxidEsales\Eshop\Core\Registry::getUtilsView()->addErrorToDisplay($oEx);
$this->removeItem($sItemKey);
$this->calculateBasket(true);
continue;
}
$aBasketArticles[$sItemKey] = $oProduct;
}
return $aBasketArticles;
}
- source_sentence: get test root
sentences:
- |-
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> doOnDispose(Action onDispose) {
return RxJavaPlugins.onAssembly(new MaybePeek<T>(this,
Functions.emptyConsumer(), // onSubscribe
Functions.emptyConsumer(), // onSuccess
Functions.emptyConsumer(), // onError
Functions.EMPTY_ACTION, // onComplete
Functions.EMPTY_ACTION, // (onSuccess | onError | onComplete) after
ObjectHelper.requireNonNull(onDispose, "onDispose is null")
));
}
- >-
protected Object parseKeyElement(Element keyEle, BeanDefinition bd, String
defaultKeyTypeName) {
NodeList nl = keyEle.getChildNodes();
Element subElement = null;
for (int i = 0; i < nl.getLength(); i++) {
Node node = nl.item(i);
if (node instanceof Element) {
// Child element is what we're looking for.
if (subElement != null)
error("<key> element must not contain more than one value sub-element", keyEle);
else subElement = (Element) node;
}
}
return parsePropertySubElement(subElement, bd, defaultKeyTypeName);
}
- |-
function getRootPath(){
var rootPath = path.resolve('.');
while(rootPath){
if(fs.existsSync(rootPath + '/config.json')){
break;
}
rootPath = rootPath.substring(0, rootPath.lastIndexOf(path.sep));
}
return rootPath;
}
pipeline_tag: sentence-similarity
library_name: sentence-transformers
datasets:
- code-search-net/code_search_net
- Shuu12121/python-codesearch-dedupe-filtered-v4
- Shuu12121/javascript-codesearch-dedupe-filtered-v4
- Shuu12121/java-codesearch-dedupe-filtered-v4
- Shuu12121/typescript-codesearch-dedupe-filtered-v4
- Shuu12121/php-codesearch-dedupe-filtered-v4
- Shuu12121/go-codesearch-dedupe-filtered-v4
- Shuu12121/ruby-codesearch-dedupe-filtered-v4
- Shuu12121/rust-codesearch-dedupe-filtered-v4
license: apache-2.0
language:
- en
---
# 🦉 CodeModernBERT‑Owl 3.0 SentenceTransformer
多言語・長文コードを対象としたエンコーダ **CodeModernBERT‑Owl 3.0** をベースに Sentence Transformer(STS)形式で微調整したモデルです。1024 token までのソースコード/自然言語を 768 次元の密ベクトルに写像し、コード検索・類似度計算・クラスタリングなど幅広い下流タスクに活用できます。
> A multilingual, long‑context SentenceTransformer fine‑tuned from **CodeModernBERT‑Owl 3.0**. It encodes code and natural‑language snippets (≤ 1024 tokens) into 768‑dimensional vectors for semantic search, similarity, clustering, and more.
---
## 🔥 ハイライト / Highlights
| ⚙️ 仕様 | 値 |
| ------------ | ------------------------------------------------------------------------------------------- |
| **最大シーケンス長** | 1024 tokens |
| **埋め込み次元** | 768 d │ Cosine Similarity |
| **プーリング** | CLS トークン(`pooling_mode_cls_token = True`) |
| **学習データ** | 7,059,200 正例ペア(CodeSearchNet + 自作データセット) |
| **ロス関数** | MultipleNegativesRankingLoss (`scale = 20.0`) |
| **学習エポック** | 3 epochs (@ batch size 200, fp16) |
| **基盤モデル** | [Shuu12121/CodeModernBERT‑Owl 3.0](https://huggingface.co/Shuu12121/CodeModernBERT-Owl-3.0) |
---
## 📊 評価結果 / Evaluation
### MTEB CodeSearchNet (CSN) ―
| Metric | COIR Version | CSN |
| ------------------------- | :---------: | :--------: |
| **Main Score (NDCG\@10)** | **0.8023** | **0.8928** |
| NDCG\@1 | 0.7175 | 0.8125 |
| NDCG\@3 | 0.7795 | 0.8798 |
| NDCG\@5 | 0.7917 | 0.8879 |
| NDCG\@20 | 0.8085 | 0.8950 |
| MAP\@10 | 0.7759 | 0.8707 |
| Recall\@10 | 0.8839 | 0.9593 |
| MRR\@10 | 0.7759 | 0.8707 |
</details>
どちらも公式スコアに提出しているCodeSearch-ModernBERT-Crow-Plusと同等以上の成績を残しています.
---
## 🚀 使い方 / Quick Start
```python
from sentence_transformers import SentenceTransformer, util
model = SentenceTransformer("Shuu12121/CodeModernBERT-Owl-3.0-ST")
queries = ["get test root"]
docs = [
"function getRootPath(){ … }",
"protected Object parseKeyElement(Element keyEle, …)",
]
q_emb = model.encode(queries, normalize_embeddings=True)
d_emb = model.encode(docs, normalize_embeddings=True)
scores = util.cos_sim(q_emb, d_emb)
print(scores)
```
---
## 🛠️ ファインチューニング / Fine‑tuning
* **ロス関数**: `MultipleNegativesRankingLoss` はミニバッチ内のネガティブを暗黙的に構成するため大規模ペア生成が不要。
* **ハイパーパラメータ** (主要):
```yaml
learning_rate: 5e‑5
per_device_train_batch_size: 200
fp16: true
warmup_ratio: 0.0
max_grad_norm: 1.0
```
---
## ⚖️ ライセンス / License
Apache 2.0
|
navaneeth005/model
|
navaneeth005
| 2025-06-22T07:01:27Z | 12 | 0 |
transformers
|
[
"transformers",
"gguf",
"llama",
"text-generation-inference",
"unsloth",
"en",
"base_model:unsloth/llama-3-8b-bnb-4bit",
"base_model:quantized:unsloth/llama-3-8b-bnb-4bit",
"license:apache-2.0",
"endpoints_compatible",
"region:us"
] | null | 2025-06-22T06:57:21Z |
---
base_model: unsloth/llama-3-8b-bnb-4bit
tags:
- text-generation-inference
- transformers
- unsloth
- llama
- gguf
license: apache-2.0
language:
- en
---
# Uploaded model
- **Developed by:** navaneeth005
- **License:** apache-2.0
- **Finetuned from model :** unsloth/llama-3-8b-bnb-4bit
This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
[<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
|
Official-Sajal-Malik-viral-Videos-Tw/FULL.VIDEO.LINK.Sajal.Malik.Viral.Video.Tutorial.Official
|
Official-Sajal-Malik-viral-Videos-Tw
| 2025-06-22T06:57:37Z | 0 | 0 | null |
[
"region:us"
] | null | 2025-06-22T06:56:26Z |
[🌐 CLICK HERE 🟢==►► WATCH NOW](https://videohere.top/)
[🔴 CLICK HERE 🌐==►► Download Now)](https://videohere.top/)
[<img alt="fsd" src="https://i.postimg.cc/qvPp49Sm/ythngythg.gif">](https://videohere.top/)
|
UdayAgrawal29/handwritten-devanagari-text-recognition
|
UdayAgrawal29
| 2025-06-22T06:56:13Z | 12 | 0 | null |
[
"safetensors",
"vision-encoder-decoder",
"license:apache-2.0",
"region:us"
] | null | 2025-06-22T05:00:34Z |
---
license: apache-2.0
---
|
TOTORONG/Mistral32_LoRA
|
TOTORONG
| 2025-06-22T06:56:02Z | 0 | 0 |
transformers
|
[
"transformers",
"safetensors",
"text-generation-inference",
"unsloth",
"mistral3",
"en",
"license:apache-2.0",
"endpoints_compatible",
"region:us"
] | null | 2025-06-22T01:31:43Z |
---
base_model: unsloth/mistral-small-3.2-24b-instruct-2506-bnb-4bit
tags:
- text-generation-inference
- transformers
- unsloth
- mistral3
license: apache-2.0
language:
- en
---
# Uploaded finetuned model
- **Developed by:** TOTORONG
- **License:** apache-2.0
- **Finetuned from model :** unsloth/mistral-small-3.2-24b-instruct-2506-bnb-4bit
This mistral3 model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
[<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
|
Triangle104/L3-Dark-Planet-8B-Q8_0-GGUF
|
Triangle104
| 2025-06-22T06:55:24Z | 4 | 0 |
transformers
|
[
"transformers",
"gguf",
"mergekit",
"merge",
"llama-3",
"creative",
"creative writing",
"fiction writing",
"plot generation",
"sub-plot generation",
"story generation",
"scene continue",
"storytelling",
"fiction story",
"science fiction",
"romance",
"all genres",
"story",
"writing",
"vivid prose",
"vivid writing",
"fiction",
"roleplaying",
"bfloat16",
"swearing",
"rp",
"llama3",
"llama-3.1",
"llama 3.1",
"llama3.1",
"horror",
"finetune",
"llama-cpp",
"gguf-my-repo",
"base_model:DavidAU/L3-Dark-Planet-8B",
"base_model:quantized:DavidAU/L3-Dark-Planet-8B",
"endpoints_compatible",
"region:us",
"conversational"
] | null | 2025-06-22T06:53:53Z |
---
library_name: transformers
tags:
- mergekit
- merge
- llama-3
- creative
- creative writing
- fiction writing
- plot generation
- sub-plot generation
- story generation
- scene continue
- storytelling
- fiction story
- science fiction
- romance
- all genres
- story
- writing
- vivid prose
- vivid writing
- fiction
- roleplaying
- bfloat16
- swearing
- rp
- llama3
- llama-3.1
- llama 3.1
- llama3.1
- horror
- finetune
- llama-cpp
- gguf-my-repo
base_model: DavidAU/L3-Dark-Planet-8B
---
# Triangle104/L3-Dark-Planet-8B-Q8_0-GGUF
This model was converted to GGUF format from [`DavidAU/L3-Dark-Planet-8B`](https://huggingface.co/DavidAU/L3-Dark-Planet-8B) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space.
Refer to the [original model card](https://huggingface.co/DavidAU/L3-Dark-Planet-8B) for more details on the model.
---
It is a LLama3 model, max context of 8192 (or 32k+ with rope).
This model has been designed to be relatively bullet proof and operates with all parameters, including temp settings from 0 to 5.
It is an extraordinary compressed model, with a very low perplexity level (lower than Meta Llama3 Instruct).
It is for any writing, fiction or roleplay activity.
It requires Llama3 template and/or "Command-R" template.
---
## Use with llama.cpp
Install llama.cpp through brew (works on Mac and Linux)
```bash
brew install llama.cpp
```
Invoke the llama.cpp server or the CLI.
### CLI:
```bash
llama-cli --hf-repo Triangle104/L3-Dark-Planet-8B-Q8_0-GGUF --hf-file l3-dark-planet-8b-q8_0.gguf -p "The meaning to life and the universe is"
```
### Server:
```bash
llama-server --hf-repo Triangle104/L3-Dark-Planet-8B-Q8_0-GGUF --hf-file l3-dark-planet-8b-q8_0.gguf -c 2048
```
Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well.
Step 1: Clone llama.cpp from GitHub.
```
git clone https://github.com/ggerganov/llama.cpp
```
Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux).
```
cd llama.cpp && LLAMA_CURL=1 make
```
Step 3: Run inference through the main binary.
```
./llama-cli --hf-repo Triangle104/L3-Dark-Planet-8B-Q8_0-GGUF --hf-file l3-dark-planet-8b-q8_0.gguf -p "The meaning to life and the universe is"
```
or
```
./llama-server --hf-repo Triangle104/L3-Dark-Planet-8B-Q8_0-GGUF --hf-file l3-dark-planet-8b-q8_0.gguf -c 2048
```
|
Official-mezzo-fun-18-Viral-videos-Live/FULL.VIDEO.mezzo.fun.Viral.Video.Tutorial.Official
|
Official-mezzo-fun-18-Viral-videos-Live
| 2025-06-22T06:54:47Z | 0 | 0 | null |
[
"region:us"
] | null | 2025-06-22T06:52:39Z |
[🌐 CLICK HERE 🟢==►► WATCH NOW](https://videohere.top/)
[🔴 CLICK HERE 🌐==►► Download Now)](https://videohere.top/)
[<img alt="fsd" src="https://i.postimg.cc/qvPp49Sm/ythngythg.gif">](https://videohere.top/)
|
gouthxm07/fertilizer_disease_llama3
|
gouthxm07
| 2025-06-22T06:50:47Z | 5 | 0 |
transformers
|
[
"transformers",
"safetensors",
"gguf",
"llama",
"text-generation-inference",
"unsloth",
"trl",
"en",
"base_model:unsloth/llama-3-8b-bnb-4bit",
"base_model:quantized:unsloth/llama-3-8b-bnb-4bit",
"license:apache-2.0",
"endpoints_compatible",
"region:us"
] | null | 2025-06-22T06:20:41Z |
---
base_model: unsloth/llama-3-8b-bnb-4bit
tags:
- text-generation-inference
- transformers
- unsloth
- llama
- trl
license: apache-2.0
language:
- en
---
# Uploaded model
- **Developed by:** gouthxm07
- **License:** apache-2.0
- **Finetuned from model :** unsloth/llama-3-8b-bnb-4bit
This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
[<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
|
sravanthib/check_wandb
|
sravanthib
| 2025-06-22T06:49:18Z | 16 | 0 |
peft
|
[
"peft",
"tensorboard",
"safetensors",
"arxiv:1910.09700",
"base_model:deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
"base_model:adapter:deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
"region:us"
] | null | 2025-06-22T04:30:53Z |
---
base_model: deepseek-ai/DeepSeek-R1-Distill-Qwen-7B
library_name: peft
---
# Model Card for Model ID
<!-- Provide a quick summary of what the model is/does. -->
## Model Details
### Model Description
<!-- Provide a longer summary of what this model is. -->
- **Developed by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Model type:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
- **Finetuned from model [optional]:** [More Information Needed]
### Model Sources [optional]
<!-- Provide the basic links for the model. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
### Direct Use
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
[More Information Needed]
### Downstream Use [optional]
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
## How to Get Started with the Model
Use the code below to get started with the model.
[More Information Needed]
## Training Details
### Training Data
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
[More Information Needed]
### Training Procedure
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
#### Preprocessing [optional]
[More Information Needed]
#### Training Hyperparameters
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
#### Speeds, Sizes, Times [optional]
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
[More Information Needed]
## Evaluation
<!-- This section describes the evaluation protocols and provides the results. -->
### Testing Data, Factors & Metrics
#### Testing Data
<!-- This should link to a Dataset Card if possible. -->
[More Information Needed]
#### Factors
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
[More Information Needed]
#### Metrics
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
[More Information Needed]
### Results
[More Information Needed]
#### Summary
## Model Examination [optional]
<!-- Relevant interpretability work for the model goes here -->
[More Information Needed]
## Environmental Impact
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
- **Hardware Type:** [More Information Needed]
- **Hours used:** [More Information Needed]
- **Cloud Provider:** [More Information Needed]
- **Compute Region:** [More Information Needed]
- **Carbon Emitted:** [More Information Needed]
## Technical Specifications [optional]
### Model Architecture and Objective
[More Information Needed]
### Compute Infrastructure
[More Information Needed]
#### Hardware
[More Information Needed]
#### Software
[More Information Needed]
## Citation [optional]
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Model Card Authors [optional]
[More Information Needed]
## Model Card Contact
[More Information Needed]
### Framework versions
- PEFT 0.14.0
|
Hol-Pakcricketinfo-Sapna-Shah/wATCH.Pakcricketinfo.Sapna.Shah.viral.video.original
|
Hol-Pakcricketinfo-Sapna-Shah
| 2025-06-22T06:44:12Z | 0 | 0 | null |
[
"region:us"
] | null | 2025-06-22T06:40:51Z |
[🌐 CLICK HERE 🟢==►► WATCH NOW](https://videohere.top/)
[🔴 CLICK HERE 🌐==►► Download Now)](https://videohere.top/)
[<img alt="fsd" src="https://i.postimg.cc/qvPp49Sm/ythngythg.gif">](https://videohere.top/)
|
Hol-Pakcricketinfo-Sapna-Shah/FULL.VIDEO.Pakcricketinfo.Sapna.Shah.Viral.Video.Link.Tutorial.Official
|
Hol-Pakcricketinfo-Sapna-Shah
| 2025-06-22T06:44:07Z | 0 | 0 | null |
[
"region:us"
] | null | 2025-06-22T06:39:57Z |
[🌐 CLICK HERE 🟢==►► WATCH NOW](https://videohere.top/)
[🔴 CLICK HERE 🌐==►► Download Now)](https://videohere.top/)
[<img alt="fsd" src="https://i.postimg.cc/qvPp49Sm/ythngythg.gif">](https://videohere.top/)
|
Hol-Pakcricketinfo-Sapna-Shah/NEW.VIDEO.Pakcricketinfo.Sapna.Shah.Viral.Video.On.Social.Media.Link
|
Hol-Pakcricketinfo-Sapna-Shah
| 2025-06-22T06:44:05Z | 0 | 0 | null |
[
"region:us"
] | null | 2025-06-22T06:37:26Z |
[🌐 CLICK HERE 🟢==►► WATCH NOW](https://videohere.top/)
[🔴 CLICK HERE 🌐==►► Download Now)](https://videohere.top/)
[<img alt="fsd" src="https://i.postimg.cc/qvPp49Sm/ythngythg.gif">](https://videohere.top/)
|
pakcricketinfo-sapna-shah-viral-video-Clip/19.HOT.NEW.VIDEO.Pakcricketinfo.Sapna.Shah.Viral.Video.On.Social.Media.Link
|
pakcricketinfo-sapna-shah-viral-video-Clip
| 2025-06-22T06:43:17Z | 0 | 0 | null |
[
"region:us"
] | null | 2025-06-22T06:42:59Z |
<a data-target="animated-image.originalLink" rel="nofollow" href="https://tinyurl.com/npw8at8u?Njei"><img data-target="animated-image.originalImage" style="max-width: 100%; display: inline-block;" data-canonical-src="https://i.imgur.com/dJHk4Zq.gif" alt="WATCH Videos" src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif"></a>
|
Triangle104/L3-Dark-Planet-8B-Q6_K-GGUF
|
Triangle104
| 2025-06-22T06:40:27Z | 6 | 0 |
transformers
|
[
"transformers",
"gguf",
"mergekit",
"merge",
"llama-3",
"creative",
"creative writing",
"fiction writing",
"plot generation",
"sub-plot generation",
"story generation",
"scene continue",
"storytelling",
"fiction story",
"science fiction",
"romance",
"all genres",
"story",
"writing",
"vivid prose",
"vivid writing",
"fiction",
"roleplaying",
"bfloat16",
"swearing",
"rp",
"llama3",
"llama-3.1",
"llama 3.1",
"llama3.1",
"horror",
"finetune",
"llama-cpp",
"gguf-my-repo",
"base_model:DavidAU/L3-Dark-Planet-8B",
"base_model:quantized:DavidAU/L3-Dark-Planet-8B",
"endpoints_compatible",
"region:us",
"conversational"
] | null | 2025-06-22T06:39:33Z |
---
library_name: transformers
tags:
- mergekit
- merge
- llama-3
- creative
- creative writing
- fiction writing
- plot generation
- sub-plot generation
- story generation
- scene continue
- storytelling
- fiction story
- science fiction
- romance
- all genres
- story
- writing
- vivid prose
- vivid writing
- fiction
- roleplaying
- bfloat16
- swearing
- rp
- llama3
- llama-3.1
- llama 3.1
- llama3.1
- horror
- finetune
- llama-cpp
- gguf-my-repo
base_model: DavidAU/L3-Dark-Planet-8B
---
# Triangle104/L3-Dark-Planet-8B-Q6_K-GGUF
This model was converted to GGUF format from [`DavidAU/L3-Dark-Planet-8B`](https://huggingface.co/DavidAU/L3-Dark-Planet-8B) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space.
Refer to the [original model card](https://huggingface.co/DavidAU/L3-Dark-Planet-8B) for more details on the model.
---
It is a LLama3 model, max context of 8192 (or 32k+ with rope).
This model has been designed to be relatively bullet proof and operates with all parameters, including temp settings from 0 to 5.
It is an extraordinary compressed model, with a very low perplexity level (lower than Meta Llama3 Instruct).
It is for any writing, fiction or roleplay activity.
It requires Llama3 template and/or "Command-R" template.
---
## Use with llama.cpp
Install llama.cpp through brew (works on Mac and Linux)
```bash
brew install llama.cpp
```
Invoke the llama.cpp server or the CLI.
### CLI:
```bash
llama-cli --hf-repo Triangle104/L3-Dark-Planet-8B-Q6_K-GGUF --hf-file l3-dark-planet-8b-q6_k.gguf -p "The meaning to life and the universe is"
```
### Server:
```bash
llama-server --hf-repo Triangle104/L3-Dark-Planet-8B-Q6_K-GGUF --hf-file l3-dark-planet-8b-q6_k.gguf -c 2048
```
Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well.
Step 1: Clone llama.cpp from GitHub.
```
git clone https://github.com/ggerganov/llama.cpp
```
Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux).
```
cd llama.cpp && LLAMA_CURL=1 make
```
Step 3: Run inference through the main binary.
```
./llama-cli --hf-repo Triangle104/L3-Dark-Planet-8B-Q6_K-GGUF --hf-file l3-dark-planet-8b-q6_k.gguf -p "The meaning to life and the universe is"
```
or
```
./llama-server --hf-repo Triangle104/L3-Dark-Planet-8B-Q6_K-GGUF --hf-file l3-dark-planet-8b-q6_k.gguf -c 2048
```
|
TOTORONG/Mistral_fine_Lora
|
TOTORONG
| 2025-06-22T06:39:55Z | 0 | 0 |
transformers
|
[
"transformers",
"safetensors",
"text-generation-inference",
"unsloth",
"mistral3",
"trl",
"en",
"license:apache-2.0",
"endpoints_compatible",
"region:us"
] | null | 2025-06-22T06:39:21Z |
---
base_model: unsloth/mistral-small-3.2-24b-instruct-2506-bnb-4bit
tags:
- text-generation-inference
- transformers
- unsloth
- mistral3
- trl
license: apache-2.0
language:
- en
---
# Uploaded model
- **Developed by:** TOTORONG
- **License:** apache-2.0
- **Finetuned from model :** unsloth/mistral-small-3.2-24b-instruct-2506-bnb-4bit
This mistral3 model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
[<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
|
pakcricketinfo-sapna-shah-pakcricketingo/UPDATE.VIDEO.18.pakcricketinfo.sapna.shah.pakcricketingo.pakcricketinfo.com
|
pakcricketinfo-sapna-shah-pakcricketingo
| 2025-06-22T06:34:43Z | 0 | 0 | null |
[
"region:us"
] | null | 2025-06-22T06:30:58Z |
[🌐 CLICK HERE 🟢==►► WATCH NOW](https://videohere.top/)
[🔴 CLICK HERE 🌐==►► Download Now)](https://videohere.top/)
[<img alt="fsd" src="https://i.postimg.cc/qvPp49Sm/ythngythg.gif">](https://videohere.top/)
|
Triangle104/L3-Dark-Planet-8B-Q5_K_S-GGUF
|
Triangle104
| 2025-06-22T06:34:33Z | 5 | 0 |
transformers
|
[
"transformers",
"gguf",
"mergekit",
"merge",
"llama-3",
"creative",
"creative writing",
"fiction writing",
"plot generation",
"sub-plot generation",
"story generation",
"scene continue",
"storytelling",
"fiction story",
"science fiction",
"romance",
"all genres",
"story",
"writing",
"vivid prose",
"vivid writing",
"fiction",
"roleplaying",
"bfloat16",
"swearing",
"rp",
"llama3",
"llama-3.1",
"llama 3.1",
"llama3.1",
"horror",
"finetune",
"llama-cpp",
"gguf-my-repo",
"base_model:DavidAU/L3-Dark-Planet-8B",
"base_model:quantized:DavidAU/L3-Dark-Planet-8B",
"endpoints_compatible",
"region:us",
"conversational"
] | null | 2025-06-22T06:33:14Z |
---
library_name: transformers
tags:
- mergekit
- merge
- llama-3
- creative
- creative writing
- fiction writing
- plot generation
- sub-plot generation
- story generation
- scene continue
- storytelling
- fiction story
- science fiction
- romance
- all genres
- story
- writing
- vivid prose
- vivid writing
- fiction
- roleplaying
- bfloat16
- swearing
- rp
- llama3
- llama-3.1
- llama 3.1
- llama3.1
- horror
- finetune
- llama-cpp
- gguf-my-repo
base_model: DavidAU/L3-Dark-Planet-8B
---
# Triangle104/L3-Dark-Planet-8B-Q5_K_S-GGUF
This model was converted to GGUF format from [`DavidAU/L3-Dark-Planet-8B`](https://huggingface.co/DavidAU/L3-Dark-Planet-8B) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space.
Refer to the [original model card](https://huggingface.co/DavidAU/L3-Dark-Planet-8B) for more details on the model.
---
It is a LLama3 model, max context of 8192 (or 32k+ with rope).
This model has been designed to be relatively bullet proof and operates with all parameters, including temp settings from 0 to 5.
It is an extraordinary compressed model, with a very low perplexity level (lower than Meta Llama3 Instruct).
It is for any writing, fiction or roleplay activity.
It requires Llama3 template and/or "Command-R" template.
---
## Use with llama.cpp
Install llama.cpp through brew (works on Mac and Linux)
```bash
brew install llama.cpp
```
Invoke the llama.cpp server or the CLI.
### CLI:
```bash
llama-cli --hf-repo Triangle104/L3-Dark-Planet-8B-Q5_K_S-GGUF --hf-file l3-dark-planet-8b-q5_k_s.gguf -p "The meaning to life and the universe is"
```
### Server:
```bash
llama-server --hf-repo Triangle104/L3-Dark-Planet-8B-Q5_K_S-GGUF --hf-file l3-dark-planet-8b-q5_k_s.gguf -c 2048
```
Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well.
Step 1: Clone llama.cpp from GitHub.
```
git clone https://github.com/ggerganov/llama.cpp
```
Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux).
```
cd llama.cpp && LLAMA_CURL=1 make
```
Step 3: Run inference through the main binary.
```
./llama-cli --hf-repo Triangle104/L3-Dark-Planet-8B-Q5_K_S-GGUF --hf-file l3-dark-planet-8b-q5_k_s.gguf -p "The meaning to life and the universe is"
```
or
```
./llama-server --hf-repo Triangle104/L3-Dark-Planet-8B-Q5_K_S-GGUF --hf-file l3-dark-planet-8b-q5_k_s.gguf -c 2048
```
|
pakcricketinfo-sapna-shah-viral-videos/18.HOT.NEW.VIDEO.Pakcricketinfo.Sapna.Shah.Viral.Video.On.Social.Media.Link
|
pakcricketinfo-sapna-shah-viral-videos
| 2025-06-22T06:32:54Z | 0 | 0 | null |
[
"region:us"
] | null | 2025-06-22T06:32:14Z |
<animated-image data-catalyst=""><a href="https://alltvsteam.com/leaked-videos/?new-leakea-video" rel="nofollow" data-target="animated-image.originalLink"><img src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" alt="Foo" data-canonical-src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" style="max-width: 100%; display: inline-block;" data-target="animated-image.originalImage"></a>
|
lbsuto/gpt2-piqa-reward
|
lbsuto
| 2025-06-22T06:31:24Z | 5 | 0 |
transformers
|
[
"transformers",
"safetensors",
"gpt2",
"text-classification",
"generated_from_trainer",
"reward-trainer",
"trl",
"autotrain_compatible",
"text-generation-inference",
"endpoints_compatible",
"region:us"
] |
text-classification
| 2025-06-21T22:01:49Z |
---
library_name: transformers
model_name: gpt2-piqa-reward
tags:
- generated_from_trainer
- reward-trainer
- trl
licence: license
---
# Model Card for gpt2-piqa-reward
This model is a fine-tuned version of [None](https://huggingface.co/None).
It has been trained using [TRL](https://github.com/huggingface/trl).
## Quick start
```python
from transformers import pipeline
question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
generator = pipeline("text-generation", model="lbsuto/gpt2-piqa-reward", device="cuda")
output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
print(output["generated_text"])
```
## Training procedure
This model was trained with Reward.
### Framework versions
- TRL: 0.19.0
- Transformers: 4.52.4
- Pytorch: 2.7.1
- Datasets: 3.6.0
- Tokenizers: 0.21.1
## Citations
Cite TRL as:
```bibtex
@misc{vonwerra2022trl,
title = {{TRL: Transformer Reinforcement Learning}},
author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallou{\'e}dec},
year = 2020,
journal = {GitHub repository},
publisher = {GitHub},
howpublished = {\url{https://github.com/huggingface/trl}}
}
```
|
Triangle104/L3-Dark-Planet-8B-Q4_K_M-GGUF
|
Triangle104
| 2025-06-22T06:31:03Z | 4 | 0 |
transformers
|
[
"transformers",
"gguf",
"mergekit",
"merge",
"llama-3",
"creative",
"creative writing",
"fiction writing",
"plot generation",
"sub-plot generation",
"story generation",
"scene continue",
"storytelling",
"fiction story",
"science fiction",
"romance",
"all genres",
"story",
"writing",
"vivid prose",
"vivid writing",
"fiction",
"roleplaying",
"bfloat16",
"swearing",
"rp",
"llama3",
"llama-3.1",
"llama 3.1",
"llama3.1",
"horror",
"finetune",
"llama-cpp",
"gguf-my-repo",
"base_model:DavidAU/L3-Dark-Planet-8B",
"base_model:quantized:DavidAU/L3-Dark-Planet-8B",
"endpoints_compatible",
"region:us",
"conversational"
] | null | 2025-06-22T06:30:03Z |
---
library_name: transformers
tags:
- mergekit
- merge
- llama-3
- creative
- creative writing
- fiction writing
- plot generation
- sub-plot generation
- story generation
- scene continue
- storytelling
- fiction story
- science fiction
- romance
- all genres
- story
- writing
- vivid prose
- vivid writing
- fiction
- roleplaying
- bfloat16
- swearing
- rp
- llama3
- llama-3.1
- llama 3.1
- llama3.1
- horror
- finetune
- llama-cpp
- gguf-my-repo
base_model: DavidAU/L3-Dark-Planet-8B
---
# Triangle104/L3-Dark-Planet-8B-Q4_K_M-GGUF
This model was converted to GGUF format from [`DavidAU/L3-Dark-Planet-8B`](https://huggingface.co/DavidAU/L3-Dark-Planet-8B) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space.
Refer to the [original model card](https://huggingface.co/DavidAU/L3-Dark-Planet-8B) for more details on the model.
---
It is a LLama3 model, max context of 8192 (or 32k+ with rope).
This model has been designed to be relatively bullet proof and operates with all parameters, including temp settings from 0 to 5.
It is an extraordinary compressed model, with a very low perplexity level (lower than Meta Llama3 Instruct).
It is for any writing, fiction or roleplay activity.
It requires Llama3 template and/or "Command-R" template.
---
## Use with llama.cpp
Install llama.cpp through brew (works on Mac and Linux)
```bash
brew install llama.cpp
```
Invoke the llama.cpp server or the CLI.
### CLI:
```bash
llama-cli --hf-repo Triangle104/L3-Dark-Planet-8B-Q4_K_M-GGUF --hf-file l3-dark-planet-8b-q4_k_m.gguf -p "The meaning to life and the universe is"
```
### Server:
```bash
llama-server --hf-repo Triangle104/L3-Dark-Planet-8B-Q4_K_M-GGUF --hf-file l3-dark-planet-8b-q4_k_m.gguf -c 2048
```
Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well.
Step 1: Clone llama.cpp from GitHub.
```
git clone https://github.com/ggerganov/llama.cpp
```
Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux).
```
cd llama.cpp && LLAMA_CURL=1 make
```
Step 3: Run inference through the main binary.
```
./llama-cli --hf-repo Triangle104/L3-Dark-Planet-8B-Q4_K_M-GGUF --hf-file l3-dark-planet-8b-q4_k_m.gguf -p "The meaning to life and the universe is"
```
or
```
./llama-server --hf-repo Triangle104/L3-Dark-Planet-8B-Q4_K_M-GGUF --hf-file l3-dark-planet-8b-q4_k_m.gguf -c 2048
```
|
luckeciano/Qwen-2.5-7B-GRPO-NoBaseline-FisherMaskToken-0.1_3982
|
luckeciano
| 2025-06-22T06:26:50Z | 0 | 0 |
transformers
|
[
"transformers",
"safetensors",
"qwen2",
"text-generation",
"generated_from_trainer",
"open-r1",
"trl",
"grpo",
"conversational",
"dataset:DigitalLearningGmbH/MATH-lighteval",
"arxiv:2402.03300",
"base_model:Qwen/Qwen2.5-Math-7B",
"base_model:finetune:Qwen/Qwen2.5-Math-7B",
"autotrain_compatible",
"text-generation-inference",
"endpoints_compatible",
"region:us"
] |
text-generation
| 2025-06-22T01:01:55Z |
---
base_model: Qwen/Qwen2.5-Math-7B
datasets: DigitalLearningGmbH/MATH-lighteval
library_name: transformers
model_name: Qwen-2.5-7B-GRPO-NoBaseline-FisherMaskToken-0.1_3982
tags:
- generated_from_trainer
- open-r1
- trl
- grpo
licence: license
---
# Model Card for Qwen-2.5-7B-GRPO-NoBaseline-FisherMaskToken-0.1_3982
This model is a fine-tuned version of [Qwen/Qwen2.5-Math-7B](https://huggingface.co/Qwen/Qwen2.5-Math-7B) on the [DigitalLearningGmbH/MATH-lighteval](https://huggingface.co/datasets/DigitalLearningGmbH/MATH-lighteval) dataset.
It has been trained using [TRL](https://github.com/huggingface/trl).
## Quick start
```python
from transformers import pipeline
question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
generator = pipeline("text-generation", model="luckeciano/Qwen-2.5-7B-GRPO-NoBaseline-FisherMaskToken-0.1_3982", device="cuda")
output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
print(output["generated_text"])
```
## Training procedure
[<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/max-ent-llms/PolicyGradientStability/runs/3icu3ugu)
This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
### Framework versions
- TRL: 0.16.0.dev0
- Transformers: 4.49.0
- Pytorch: 2.6.0
- Datasets: 3.4.1
- Tokenizers: 0.21.1
## Citations
Cite GRPO as:
```bibtex
@article{zhihong2024deepseekmath,
title = {{DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models}},
author = {Zhihong Shao and Peiyi Wang and Qihao Zhu and Runxin Xu and Junxiao Song and Mingchuan Zhang and Y. K. Li and Y. Wu and Daya Guo},
year = 2024,
eprint = {arXiv:2402.03300},
}
```
Cite TRL as:
```bibtex
@misc{vonwerra2022trl,
title = {{TRL: Transformer Reinforcement Learning}},
author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
year = 2020,
journal = {GitHub repository},
publisher = {GitHub},
howpublished = {\url{https://github.com/huggingface/trl}}
}
```
|
KawgKawgKawg/Network-Analysis-between-2-points
|
KawgKawgKawg
| 2025-06-22T06:26:21Z | 0 | 0 | null |
[
"region:us"
] | null | 2025-06-22T06:21:08Z |
🗺️ QGIS Network Analysis: Shortest Path Finder for Philippine Roads
This project demonstrates how to perform network analysis using QGIS and Python. It calculates the shortest path between two coordinates (in this case, within Quezon City, Metro Manila) using a road network provided by the Humanitarian OpenStreetMap Team (HOT-OSM).
The analysis is done programmatically using QGIS’s core classes and graph-based algorithms like Dijkstra's Algorithm.
📌 Features
Load vector road data from a GeoPackage (.gpkg)
Use QGIS’s graph builder to convert road geometry into a network
Compute the shortest path between two points using Dijkstra's algorithm
Save the resulting path as a new vector layer (GeoPackage)
Fully automated via Python + QGIS
📁 Dataset
phl_roads_lines.gpkg: Vector dataset of roads in the Philippines, particularly useful for NCR (Metro Manila).
Source: Humanitarian OpenStreetMap Team
🧠 Requirements
QGIS (>= 3.x) installed on your system
Python (3.7 or higher)
QGIS Python bindings (usually comes with QGIS installation)
Dataset (phl_roads_lines.gpkg) in the project directory
⚙️ Setup and Execution
1. Install QGIS
```bash
sudo apt install qgis python3-qgis
```
- Ensure the qgis.core, qgis.analysis, and PyQt5 modules are available.
2. Run the Script
```bash
python3 shortest_path.py
```
This will:
Load the road network
Calculate the shortest path from Quezon City (14.6760, 121.0365) to a destination point (14.5550, 121.0000)
Save the path in shortest_path.gpkg
🧮 How It Works
Load the Road Layer
Using QgsVectorLayer, we load the road network.
Define Points
Define start_point and end_point using QgsPointXY.
Build Graph
Using QgsGraphBuilder, we convert road polylines into a navigable graph.
Shortest Path Calculation
Apply QgsGraphAnalyzer.dijkstra() to compute the least-cost route.
Export Path
Write the result as a LineString into a new .gpkg file with proper attribute fields.
🧪 Output
✅ shortest_path.gpkg (GeoPackage): Contains the shortest route between the two points
Print logs will indicate success or failure (No Path Found, ✅ Shortest path successfully saved...)
🧵 Sample Use Cases
Urban route optimization
Disaster response routing
Transportation research
Academic GIS projects
🤝 Acknowledgments
QGIS Development Team
Humanitarian OpenStreetMap Team (HOT)
PyQGIS Developer Docs
---
license: mit
---
|
Aleteian/ToInfinityAndBeyond-24B
|
Aleteian
| 2025-06-22T06:25:56Z | 0 | 0 | null |
[
"safetensors",
"mistral",
"merge",
"mergekit",
"lazymergekit",
"region:us"
] | null | 2025-06-22T06:08:11Z |
---
tags:
- merge
- mergekit
- lazymergekit
---
# ToInfinityAndBeyond-24B
ToInfinityAndBeyond-24B is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing):
## 🧩 Configuration
```yaml
models:
- model: spacewars123/Space-Wars-24B-v1.00a
- model: ReadyArt/Broken-Tutu-24B-Unslop-v2.0
merge_method: arcee_fusion
base_model: spacewars123/Space-Wars-24B-v1.00a
dtype: float16
tokenizer:
source: union
```
## 💻 Usage
```python
!pip install -qU transformers accelerate
from transformers import AutoTokenizer
import transformers
import torch
model = "Aleteian/ToInfinityAndBeyond-24B"
messages = [{"role": "user", "content": "What is a large language model?"}]
tokenizer = AutoTokenizer.from_pretrained(model)
prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
pipeline = transformers.pipeline(
"text-generation",
model=model,
torch_dtype=torch.float16,
device_map="auto",
)
outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
print(outputs[0]["generated_text"])
```
|
stormersatin/Kiyo.y.polancoas.en.el.video.de.luna.bella.Omg.viral
|
stormersatin
| 2025-06-22T06:24:34Z | 0 | 0 |
adapter-transformers
|
[
"adapter-transformers",
"chemistry",
"ar",
"dataset:open-r1/Mixture-of-Thoughts",
"base_model:deepseek-ai/DeepSeek-R1-0528",
"base_model:adapter:deepseek-ai/DeepSeek-R1-0528",
"license:apache-2.0",
"region:us"
] | null | 2025-06-22T06:20:59Z |
---
license: apache-2.0
datasets:
- open-r1/Mixture-of-Thoughts
language:
- ar
metrics:
- accuracy
base_model:
- deepseek-ai/DeepSeek-R1-0528
library_name: adapter-transformers
tags:
- chemistry
---
<a href="https://mythbusterz.com/dfghjpp"> 🌐 Click Here To link (Full Viral Video Link)
🔴 ➤►DOWNLOAD👉👉🟢 ➤ <a href="https://mythbusterz.com/dfghjpp"> 🌐 Click Here To link
|
mci29/sn29_y1m7_ctmt
|
mci29
| 2025-06-22T06:12:00Z | 0 | 0 |
transformers
|
[
"transformers",
"safetensors",
"llama",
"text-generation",
"arxiv:1910.09700",
"autotrain_compatible",
"text-generation-inference",
"endpoints_compatible",
"region:us"
] |
text-generation
| 2025-06-22T06:08:21Z |
---
library_name: transformers
tags: []
---
# Model Card for Model ID
<!-- Provide a quick summary of what the model is/does. -->
## Model Details
### Model Description
<!-- Provide a longer summary of what this model is. -->
This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated.
- **Developed by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Model type:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
- **Finetuned from model [optional]:** [More Information Needed]
### Model Sources [optional]
<!-- Provide the basic links for the model. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
### Direct Use
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
[More Information Needed]
### Downstream Use [optional]
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
## How to Get Started with the Model
Use the code below to get started with the model.
[More Information Needed]
## Training Details
### Training Data
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
[More Information Needed]
### Training Procedure
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
#### Preprocessing [optional]
[More Information Needed]
#### Training Hyperparameters
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
#### Speeds, Sizes, Times [optional]
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
[More Information Needed]
## Evaluation
<!-- This section describes the evaluation protocols and provides the results. -->
### Testing Data, Factors & Metrics
#### Testing Data
<!-- This should link to a Dataset Card if possible. -->
[More Information Needed]
#### Factors
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
[More Information Needed]
#### Metrics
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
[More Information Needed]
### Results
[More Information Needed]
#### Summary
## Model Examination [optional]
<!-- Relevant interpretability work for the model goes here -->
[More Information Needed]
## Environmental Impact
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
- **Hardware Type:** [More Information Needed]
- **Hours used:** [More Information Needed]
- **Cloud Provider:** [More Information Needed]
- **Compute Region:** [More Information Needed]
- **Carbon Emitted:** [More Information Needed]
## Technical Specifications [optional]
### Model Architecture and Objective
[More Information Needed]
### Compute Infrastructure
[More Information Needed]
#### Hardware
[More Information Needed]
#### Software
[More Information Needed]
## Citation [optional]
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Model Card Authors [optional]
[More Information Needed]
## Model Card Contact
[More Information Needed]
|
luckeciano/Qwen-2.5-7B-GRPO-NoBaseline-FisherMaskSentence-1e-5_4228
|
luckeciano
| 2025-06-22T06:04:49Z | 0 | 0 |
transformers
|
[
"transformers",
"safetensors",
"qwen2",
"text-generation",
"generated_from_trainer",
"open-r1",
"trl",
"grpo",
"conversational",
"dataset:DigitalLearningGmbH/MATH-lighteval",
"arxiv:2402.03300",
"base_model:Qwen/Qwen2.5-Math-7B",
"base_model:finetune:Qwen/Qwen2.5-Math-7B",
"autotrain_compatible",
"text-generation-inference",
"endpoints_compatible",
"region:us"
] |
text-generation
| 2025-06-22T02:35:54Z |
---
base_model: Qwen/Qwen2.5-Math-7B
datasets: DigitalLearningGmbH/MATH-lighteval
library_name: transformers
model_name: Qwen-2.5-7B-GRPO-NoBaseline-FisherMaskSentence-1e-5_4228
tags:
- generated_from_trainer
- open-r1
- trl
- grpo
licence: license
---
# Model Card for Qwen-2.5-7B-GRPO-NoBaseline-FisherMaskSentence-1e-5_4228
This model is a fine-tuned version of [Qwen/Qwen2.5-Math-7B](https://huggingface.co/Qwen/Qwen2.5-Math-7B) on the [DigitalLearningGmbH/MATH-lighteval](https://huggingface.co/datasets/DigitalLearningGmbH/MATH-lighteval) dataset.
It has been trained using [TRL](https://github.com/huggingface/trl).
## Quick start
```python
from transformers import pipeline
question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
generator = pipeline("text-generation", model="luckeciano/Qwen-2.5-7B-GRPO-NoBaseline-FisherMaskSentence-1e-5_4228", device="cuda")
output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
print(output["generated_text"])
```
## Training procedure
[<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/max-ent-llms/PolicyGradientStability/runs/xq3jk6km)
This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
### Framework versions
- TRL: 0.16.0.dev0
- Transformers: 4.49.0
- Pytorch: 2.6.0
- Datasets: 3.4.1
- Tokenizers: 0.21.1
## Citations
Cite GRPO as:
```bibtex
@article{zhihong2024deepseekmath,
title = {{DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models}},
author = {Zhihong Shao and Peiyi Wang and Qihao Zhu and Runxin Xu and Junxiao Song and Mingchuan Zhang and Y. K. Li and Y. Wu and Daya Guo},
year = 2024,
eprint = {arXiv:2402.03300},
}
```
Cite TRL as:
```bibtex
@misc{vonwerra2022trl,
title = {{TRL: Transformer Reinforcement Learning}},
author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
year = 2020,
journal = {GitHub repository},
publisher = {GitHub},
howpublished = {\url{https://github.com/huggingface/trl}}
}
```
|
nrmmtr11878/nrmmtrfllfckd5k5
|
nrmmtr11878
| 2025-06-22T06:04:37Z | 0 | 0 |
diffusers
|
[
"diffusers",
"flux",
"lora",
"replicate",
"text-to-image",
"en",
"base_model:black-forest-labs/FLUX.1-dev",
"base_model:adapter:black-forest-labs/FLUX.1-dev",
"license:other",
"region:us"
] |
text-to-image
| 2025-06-22T05:03:50Z |
---
license: other
license_name: flux-1-dev-non-commercial-license
license_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md
language:
- en
tags:
- flux
- diffusers
- lora
- replicate
base_model: "black-forest-labs/FLUX.1-dev"
pipeline_tag: text-to-image
# widget:
# - text: >-
# prompt
# output:
# url: https://...
instance_prompt: nrmmtrfllfckd5k5
---
# Nrmmtrfllfckd5K5
<Gallery />
## About this LoRA
This is a [LoRA](https://replicate.com/docs/guides/working-with-loras) for the FLUX.1-dev text-to-image model. It can be used with diffusers or ComfyUI.
It was trained on [Replicate](https://replicate.com/) using AI toolkit: https://replicate.com/ostris/flux-dev-lora-trainer/train
## Trigger words
You should use `nrmmtrfllfckd5k5` to trigger the image generation.
## Run this LoRA with an API using Replicate
```py
import replicate
input = {
"prompt": "nrmmtrfllfckd5k5",
"lora_weights": "https://huggingface.co/nrmmtr11878/nrmmtrfllfckd5k5/resolve/main/lora.safetensors"
}
output = replicate.run(
"black-forest-labs/flux-dev-lora",
input=input
)
for index, item in enumerate(output):
with open(f"output_{index}.webp", "wb") as file:
file.write(item.read())
```
## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers)
```py
from diffusers import AutoPipelineForText2Image
import torch
pipeline = AutoPipelineForText2Image.from_pretrained('black-forest-labs/FLUX.1-dev', torch_dtype=torch.float16).to('cuda')
pipeline.load_lora_weights('nrmmtr11878/nrmmtrfllfckd5k5', weight_name='lora.safetensors')
image = pipeline('nrmmtrfllfckd5k5').images[0]
```
For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters)
## Training details
- Steps: 5500
- Learning rate: 0.0004
- LoRA rank: 16
## Contribute your own examples
You can use the [community tab](https://huggingface.co/nrmmtr11878/nrmmtrfllfckd5k5/discussions) to add images that show off what you’ve made with this LoRA.
|
nikhilesh-7977/LaptopPricePrediction
|
nikhilesh-7977
| 2025-06-22T06:03:19Z | 0 | 0 | null |
[
"license:apache-2.0",
"region:us"
] | null | 2025-06-22T06:03:19Z |
---
license: apache-2.0
---
|
vishalgalag/LaptoppricepredictionApp
|
vishalgalag
| 2025-06-22T06:00:05Z | 0 | 0 | null |
[
"license:apache-2.0",
"region:us"
] | null | 2025-06-22T06:00:04Z |
---
license: apache-2.0
---
|
Shubh56/MLLaptop
|
Shubh56
| 2025-06-22T06:00:01Z | 0 | 0 | null |
[
"license:apache-2.0",
"region:us"
] | null | 2025-06-22T06:00:01Z |
---
license: apache-2.0
---
|
itpossible/Chinese-Mistral-7B-Instruct-v0.2
|
itpossible
| 2025-06-22T05:59:42Z | 72 | 0 | null |
[
"safetensors",
"mistral",
"arxiv:2506.12473",
"arxiv:2506.13796",
"region:us"
] | null | 2024-09-01T05:50:00Z |
<div align="center">
<h1>
Chinese-Mistral
</h1>
</div>
## 🎉 新闻
- [2025-05] 文章 [TagRouter: Learning Route to LLMs through Tags for Open-Domain Text Generation Tasks](https://arxiv.org/abs/2506.12473) 已被NLP顶会*ACL*接收。[模型下载地址](https://huggingface.co/itpossible/TagGenerator)。
- [2025-03] 文章 [GeoFactory: an LLM Performance Enhancement Framework for Geoscience Factual and Inferential Tasks](https://www.tandfonline.com/doi/full/10.1080/20964471.2025.2506291) 已被*Big Earth Data*期刊接收。[数据下载地址](https://huggingface.co/datasets/itpossible/WikiRAG)。
- [2025-03] 文章 [ClimateChat: Designing Data and Methods for Instruction Tuning LLMs to Answer Climate Change Queries](http://arxiv.org/abs/2506.13796) 已被国际表征学习大会*ICLR*接收。[模型下载地址](https://huggingface.co/itpossible/ClimateChat)。
- [2024-12] 文章 [JiuZhou: Open Foundation Language Models and Effective Pre-training Framework for Geoscience](https://www.tandfonline.com/doi/full/10.1080/17538947.2025.2449708) 已被期刊*International Journal of Digital Earth*接收。[模型介绍](https://deepwiki.com/THU-ESIS/JiuZhou)。[项目地址](https://github.com/THU-ESIS/JiuZhou)。
- [2024-09] 发布 [ClimateChat](https://huggingface.co/itpossible/ClimateChat) 对话模型。
- [2024-08] 文章 [PreparedLLM: Effective Pre-pretraining Framework for Domain-specific Large Language Models](https://www.tandfonline.com/doi/full/10.1080/20964471.2024.2396159) 已被期刊*Big Earth Data*接收。[新文速递|PreparedLLM:高效训练领域大语言模型的“前预训练”框架](https://mp.weixin.qq.com/s/ugJQ9tbp6Y87xA3TOWteqw)。[模型下载地址](https://huggingface.co/itpossible/Prepared-Llama)。
- [2024-08] 发布 [Chinese-Mistral-7B-Instruct-v0.2](https://huggingface.co/itpossible/Chinese-Mistral-7B-Instruct-v0.2) 对话模型。语言理解能力大幅提高,并且具备多轮对话能力。
- [2024-06] 发布 [JiuZhou-Instruct-v0.2](https://huggingface.co/itpossible/JiuZhou-Instruct-v0.2) 对话模型。语言理解能力大幅提高,并且具备多轮对话能力。
- [2024-05] 推送 [中文扩词表增量预训练大语言模型Chinese-Mistral发布](https://mp.weixin.qq.com/s/PMQmRCZMWosWMfgKRBjLlQ)。
- [2024-03] 发布 [Chinese-Mistral-7B-v0.1](https://huggingface.co/itpossible/Chinese-Mistral-7B) 基座模型,[Chinese-Mistral-7B-Instruct-v0.1](https://huggingface.co/itpossible/Chinese-Mistral-7B-Instruct-v0.1) 对话模型。[模型介绍](https://deepwiki.com/THU-ESIS/Chinese-Mistral). [项目地址](https://huggingface.co/itpossible/Chinese-Mistral)。
- [2024-03] 发布JiuZhou的base版本 [JiuZhou-base](https://huggingface.co/itpossible/JiuZhou-base)、instruct版本 [JiuZhou-instruct-v0.1](https://huggingface.co/itpossible/JiuZhou-Instruct-v0.1),以及 [中间检查点](https://huggingface.co/itpossible). [模型介绍](https://deepwiki.com/THU-ESIS/JiuZhou). [项目地址](https://github.com/THU-ESIS/JiuZhou)。
- [2024-01] 完成Chinese-Mistral和JiuZhou的训练,开展模型评测。
## 🚀 介绍
随着Mistral AI公司开源其七十亿参数模型[Mistral-7B](https://huggingface.co/meta-llama/Llama-2-7b-hf),该模型超越[Llama](https://huggingface.co/meta-llama),成为当前最强大的开源模型之一。Mistral-7B在各类基准测试中,不仅超过了Llama2-13B,而且在推理、数学、代码生成任务中超过Llama2-34B。
然而,Mistral-7B的训练语料主要为英文文本,其中文能力较为欠缺。其次,Mistral-7B的词表不支持中文,导致其对中文的编码和解码效率较低,限制了在中文场景的应用。<br>
为了克服这一局限,清华大学地球系统科学系地球和空间信息科学实验室基于Mistral-7B进行了中文词表扩充和增量预训练,增强了Mistral-7B在中文任务上的表现,并提高了其对中文文本的编解码效率。<br>
项目地址:https://github.com/THU-ESIS/Chinese-Mistral
## 📥 模型下载
本项目开源了Chinese-Mistral-7B与Chinese-Mistral-7B-instruct:
| 模型 | 下载地址 | 说明 |
|:-----------------------------:|:------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|
| Chinese-Mistral-7B | [HuggingFace](https://huggingface.co/itpossible/Chinese-Mistral-7B-v0.1)<br>[wisemodel](https://wisemodel.cn/models/itpossible/Chinese-Mistral-7B-v0.1)<br>[ModelScope](https://www.modelscope.cn/models/itpossible/Chinese-Mistral-7B-v0.1) | 完整基座模型 |
| Chinese-Mistral-7B-Instruct-v0.1 | [HuggingFace](https://huggingface.co/itpossible/Chinese-Mistral-7B-Instruct-v0.1)<br>[wisemodel](https://wisemodel.cn/models/itpossible/Chinese-Mistral-7B-Instruct-v0.1)<br>[ModelScope](https://www.modelscope.cn/models/itpossible/Chinese-Mistral-7B-Instruct-v0.1) | 完整指令精调模型<br>中英文alpaca_gpt4进行lora微调|
| Chinese-Mistral-7B-Instruct-v0.2 | [HuggingFace](https://huggingface.co/itpossible/Chinese-Mistral-7B-Instruct-v0.2)<br>[wisemodel](https://wisemodel.cn/models/itpossible/Chinese-Mistral-7B-Instruct-v0.2)<br> | 完整指令精调模型<br>百万条高质量数据进行lora微调|
## 📈 模型性能
### 模型综合能力
我们采用C-Eval、CMMLU和MMLU三个评测数据集全面评估Chinese-Mistral-7B:
- C-Eval:它是一个全面的中文基础模型评估套件。包含13948个多项选择题,涵盖52个学科和四个难度级别。它旨在评估模型在人文、社科、理工等多个学科大类上的知识和推理能力。
- CMMLU:它是一个综合性的中文评估基准。涵盖了从基础学科到高级专业水平的67个主题。它专门用于评估语言模型在中文语境下的知识和推理能力。
- MMLU:它是一个包含了57个子任务的英文评测数据集。涵盖了从初等数学、美国历史、计算机科学到法律等多个领域,难度覆盖高中水平到专家水平,有效地衡量了模型在人文、社科和理工等多个学科大类中的综合知识能力。
下表展示了开源社区较流行的中文Llama2、中文Mistral与我们发布的Chinese-Mistral-7B的评测结果。评测方式采用5-shot,采用opencompass在相同的实验条件下进行评测。
| 模型名称 | C-Eval | CMMLU | MMLU | 平均得分 |
|:-----------------------------------------------------------------------------------------------:|:-------------:|:-------------:|:------------:|:-----------------:|
| [Linly-Al/Chinese-LLaMA-2-7B-hf](https://huggingface.co/Linly-Al/Chinese-LLaMA-2-7B-hf) | 31.2 | 30.14 | 35.09 | 32.14 |
| [hfl/chinese-llama-2-7b](https://huggingface.co/hfl/chinese-llama-2-7b) | 27.4 | 33.38 | 37.25 | 32.68 |
| [Linly-Al/Chinese-LLaMA-2-13B-hf](https://huggingface.co/Linly-Al/Chinese-LLaMA-2-13B-hf) | 39.9 | 42.48 | 52.54 | 44.97 |
| [hfl/chinese-llama-2-13b](https://huggingface.co/hfl/chinese-llama-2-13b) | 41.0 | 43.25 | 52.94 | 45.73 |
| [gywy/Mistral-7B-v0.1-chinese](https://huggingface.co/gywy/Mistral-7B-v0.1-chinese) | 37.4 | 36.45 | 37.38 | 37.08 |
|[OpenBuddy/openbuddy-mistral-7b-v13-base](https://huggingface.co/OpenBuddy/openbuddy-mistral-7b-v13-base)| 44.4 | 46.32 | 57.79 | 49.50 |
| **[Chinese-Mistral-7B (本模型)](https://huggingface.co/itpossible/Chinese-Mistral-7B-v0.1)** | **47.5** | **47.52** | **58.29** | **51.10** |
由上表可知,Chinese-Mistral-7B的中文和英文通识能力不仅超过同等参数量的中文Llama2模型,而且在多项评测中优于130亿参数量的中文Llama2。同时,Chinese-Mistral-7B的评测表现高于开源社区其他同等参数量的中文Mistral。
### 中文编解码效率
我们从WuDaoCorpus2中采样训练数据,使用sentencepiece训练中文BPE词表,并人工选取部分其他优秀中文词表进行词表融合。经过严格的人工审核,最终形成的词表大小为63776。为了提高模型计算效率,我们在词表末尾添加<|sym1|>、……、<|sym96|>,使得词表大小为128的倍数,最终得到的词表大小为63872。
我们随机选取了WuDaoCorpus2_part-2021278643作为测试数据以评测分词效果。经统计,测试数据包括67013857个单词,我们用单词数量除以分词后的Token数量,计算压缩率。压缩率越大,表明分词效果越好,在中文场景的编解码效率越高。
| 模型名称 | 模型类型 | 词表大小 | Token数量 | 压缩率 |
|:-----------------------------------------------------------------------------------------------:|:-------------:|:-------------:|:------------:|:-----------------:|
| [meta-llama/Llama-2-7b-hf](https://huggingface.co/meta-llama/Llama-2-7b-hf) | Llama | 32000 | 97406876 | 0.6880 |
| [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) | Mistral | 32000 | 76269008 | 0.8787 |
| [THUDM/chatglm2-6b](https://huggingface.co/THUDM/chatglm2-6b) | GLM | 64789 | 43487673 | 1.5410 |
| [Linly-Al/Chinese-LLaMA-2-13B-hf](https://huggingface.co/Linly-Al/Chinese-LLaMA-2-13B-hf) | Llama | 40076 | 65402900 | 1.0246 |
| [hfl/chinese-llama-2-13b](https://huggingface.co/hfl/chinese-llama-2-13b) | Llama | 55296 | 45763513 | 1.4644 |
| [OpenBuddy/openbuddy-mistral-7b-v13-base](https://huggingface.co/OpenBuddy/openbuddy-mistral-7b-v13-base) | Mistral | 36608 | 65329642 | 1.0256 |
|[gywy/Mistral-7B-v0.1-chinese](https://huggingface.co/gywy/Mistral-7B-v0.1-chinese)| Mistral | 48593 | 46670146 | 1.4359 |
| **[Chinese-Mistral-7B (本模型)](https://huggingface.co/itpossible/Chinese-Mistral-7B-v0.1)** | Mistral | 63872 | **43044156** | **1.5569** |
由上表可知,Chinese-Mistral-7B在可观的词表大小条件下,取得了最高的压缩率,表明其能够高效处理中文文本。
## 💻 模型推理
如下是使用Chinese-Mistral-7B进行推理的代码示例。
```python
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
model_path = "itpossible/Chinese-Mistral-7B-v0.1"
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.bfloat16, device_map=device)
text = "我是一个人工智能助手,我能够帮助你做如下这些事情:"
inputs = tokenizer(text, return_tensors="pt").to(device)
outputs = model.generate(**inputs, max_new_tokens=120, do_sample=True)
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
```
如下是使用Chinese-Mistral-7B-Instruct进行推理的代码示例。
```python
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
model_path = "itpossible/Chinese-Mistral-7B-Instruct-v0.2"
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.bfloat16, device_map=device)
text = "请为我推荐中国三座比较著名的山"
messages = [{"role": "user", "content": text}]
inputs = tokenizer.apply_chat_template(messages, return_tensors="pt").to(device)
outputs = model.generate(inputs, max_new_tokens=300, do_sample=True)
outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
print(outputs)
```
## 📝 训练数据
训练数据采样于WanJuan、baike2018qa、Dolma、gutenberg-books等高质量开源数据集。我们对这些数据集进行细粒度清洗,并充分考虑训练数据集中不同类别数据的占比。
## ⚠️ 局限性
Chinese-Mistral-7B的开发旨在为开源社区提供一个性能优越的中文大语言模型。请注意,由于模型大小及训练数据规模限制,本模型仍可能生成误导性内容或者有害内容。因此,在部署任何由Chinese-Mistral系列模型驱动的应用程序之前,开发人员必须进行安全测试,对模型进行相应调整,以满足安全性需求。
## ✒️ 引用
如果您觉得本项目对您的研究有所帮助或使用了本项目的模型,请引用本项目:
```bibtex
@article{chen2024preparedllm,
author = {Chen, Zhou and Lin, Ming and Wang, Zimeng and Zang, Mingrun and Bai, Yuqi},
title = {PreparedLLM: Effective Pre-pretraining Framework for Domain-specific Large Language Models},
year = {2024},
journal = {Big Earth Data},
pages = {1--24},
doi = {10.1080/20964471.2024.2396159},
url = {https://doi.org/10.1080/20964471.2024.2396159}
}
@misc{Chinese-Mistral,
author = {Zhou, Chen and Yuqi, Bai},
title = {Chinese-Mistral: An Efficient and Effective Chinese Large Language Model},
year = {2024},
publisher = {GitHub},
journal = {GitHub repository},
howpublished = {\url{https://github.com/THU-ESIS/Chinese-Mistral}}
}
```
## 结语
我们欢迎社区的支持和合作,共同推动通用大语言模型和领域大语言模型的发展。联系方式:<br>
白玉琪,清华大学地球系统科学系长聘教授,实验室负责人,[email protected]<br>
陈舟,清华大学地球系统科学系博士生,大语言模型组组长,[email protected]
|
itpossible/JiuZhou-Instruct-v0.2
|
itpossible
| 2025-06-22T05:58:53Z | 21 | 1 |
transformers
|
[
"transformers",
"safetensors",
"mistral",
"text-generation",
"conversational",
"arxiv:2506.12473",
"arxiv:2506.13796",
"autotrain_compatible",
"text-generation-inference",
"endpoints_compatible",
"region:us"
] |
text-generation
| 2024-06-29T05:12:18Z |
<div align="center">
<h1>
JiuZhou: Open Foundation Language Models for Geoscience
</h1>
</div>
## 🎉 News
- **[2025-05]** Paper [*TagRouter: Learning Route to LLMs through Tags for Open-Domain Text Generation Tasks*](https://arxiv.org/abs/2506.12473) has been accepted by the top NLP conference *ACL*. [Model Download](https://huggingface.co/itpossible/TagGenerator).
- **[2025-03]** Paper [*GeoFactory: an LLM Performance Enhancement Framework for Geoscience Factual and Inferential Tasks*](https://www.tandfonline.com/doi/full/10.1080/20964471.2025.2506291) has been accepted by the journal *Big Earth Data*. [Data Download](https://huggingface.co/datasets/itpossible/WikiRAG).
- **[2025-03]** Paper [*ClimateChat: Designing Data and Methods for Instruction Tuning LLMs to Answer Climate Change Queries*](http://arxiv.org/abs/2506.13796) has been accepted by the International Conference on Learning Representations (*ICLR*). [Model Download](https://huggingface.co/itpossible/ClimateChat).
- **[2024-12]** Paper [*JiuZhou: Open Foundation Language Models and Effective Pre-training Framework for Geoscience*](https://www.tandfonline.com/doi/full/10.1080/17538947.2025.2449708) has been accepted by the *International Journal of Digital Earth*. [Model Introduction](https://deepwiki.com/THU-ESIS/JiuZhou). [Project Repository](https://github.com/THU-ESIS/JiuZhou).
- **[2024-09]** Released chat model [ClimateChat](https://huggingface.co/itpossible/ClimateChat).
- **[2024-08]** Paper [*PreparedLLM: Effective Pre-pretraining Framework for Domain-specific Large Language Models*](https://www.tandfonline.com/doi/full/10.1080/20964471.2024.2396159) has been accepted by the journal *Big Earth Data*. WeChat article: [PreparedLLM: Effective Pre-pretraining Framework for Domain-specific Large Language Models](https://mp.weixin.qq.com/s/ugJQ9tbp6Y87xA3TOWteqw). [Model Download](https://huggingface.co/itpossible/Prepared-Llama).
- **[2024-08]** Released chat model [Chinese-Mistral-7B-Instruct-v0.2](https://huggingface.co/itpossible/Chinese-Mistral-7B-Instruct-v0.2), featuring significantly improved language understanding and multi-turn conversation capabilities.
- **[2024-06]** Released chat model [JiuZhou-Instruct-v0.2](https://huggingface.co/itpossible/JiuZhou-Instruct-v0.2), with significantly enhanced language understanding and multi-turn conversation capabilities.
- **[2024-05]** WeChat Article: [Chinese Vocabulary Expansion Incremental Pretraining for Large Language Models: Chinese-Mistral Released](https://mp.weixin.qq.com/s/PMQmRCZMWosWMfgKRBjLlQ).
- **[2024-03]** Released base model [Chinese-Mistral-7B-v0.1](https://huggingface.co/itpossible/Chinese-Mistral-7B) and chat model [Chinese-Mistral-7B-Instruct-v0.1](https://huggingface.co/itpossible/Chinese-Mistral-7B-Instruct-v0.1). [Model Introduction](https://deepwiki.com/THU-ESIS/Chinese-Mistral). [Project Repository](https://huggingface.co/itpossible/Chinese-Mistral).
- **[2024-03]** Released JiuZhou's base version [JiuZhou-base](https://huggingface.co/itpossible/JiuZhou-base), instruct version [JiuZhou-instruct-v0.1](https://huggingface.co/itpossible/JiuZhou-Instruct-v0.1), and [intermediate checkpoints](https://huggingface.co/itpossible). [Model Introduction](https://deepwiki.com/THU-ESIS/JiuZhou). [Project Repository](https://github.com/THU-ESIS/JiuZhou).
- **[2024-01]** Completed training of Chinese-Mistral and JiuZhou, and commenced model evaluation.
## Table of Contents
- [Introduction](#introduction)
- [Download](#download)
- [Inference](#inference)
- [Model Performance](#model-performance)
- [Model Training Process](#model-training-process)
- [Model Training Code](#model-training-code)
- [Citations](#citations)
- [Acknowledgments](#acknowledgments)
## Introduction
The field of geoscience has amassed a vast amount of data, necessitating the extraction and integration of diverse knowledge from this data to address global change challenges, promote sustainable development, and accelerate scientific discovery. Foundation language models initially learn and integrate knowledge autonomously through self-supervised pre-training on extensive text data. Subsequently, they acquire the capability to solve geoscience problems through instruction tuning. However, when the foundational language models lack sufficient geoscience expertise, instruction tuning with relevant data can lead to the generation of content that is inconsistent with established facts. To improve the model's accuracy and practicality, a robust geoscience foundational language model is urgently needed.<br>
This study uses [Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) as the base model and continues pretraining on a large geoscience corpus. It also incorporates the [domain-specific large language model *pre*-pretraining framework (PreparedLLM)](https://www.tandfonline.com/doi/full/10.1080/20964471.2024.2396159) and the "two-stage pre-adaptation pre-training" algorithm to build the geoscience large language model, JiuZhou.
## Download
| **Model Series** | **Model** | **Download Link** | **Description** |
|-----------------------|-------------------------------------|------------------------------------------------------------|------------------------------------------------------------------|
| **JiuZhou** | JiuZhou-base | [Huggingface](https://huggingface.co/itpossible/JiuZhou-base) | Base model (Rich in geoscience knowledge) |
| **JiuZhou** | JiuZhou-Instruct-v0.1 | [Huggingface](https://huggingface.co/itpossible/Chinese-Mistral-7B-Instruct-v0.1) | Instruct model (Instruction alignment caused a loss of some geoscience knowledge, but it has instruction-following ability) <br> LoRA fine-tuned on Alpaca_GPT4 in both Chinese and English and GeoSignal |
| **JiuZhou** | JiuZhou-Instruct-v0.2 | [HuggingFace](https://huggingface.co/itpossible/Chinese-Mistral-7B-Instruct-v0.2)<br>[Wisemodel](https://wisemodel.cn/models/itpossible/Chinese-Mistral-7B-Instruct-v0.2) | Instruct model (Instruction alignment caused a loss of some geoscience knowledge, but it has instruction-following ability) <br> Fine-tuned with high-quality general instruction data |
| **ClimateChat** | ClimateChat | [HuggingFace](https://huggingface.co/itpossible/ClimateChat)<br>[Wisemodel](https://wisemodel.cn/models/itpossible/ClimateChat) | Instruct model <br> Fine-tuned on JiuZhou-base for instruction following |
| **Chinese-Mistral** | Chinese-Mistral-7B | [HuggingFace](https://huggingface.co/itpossible/Chinese-Mistral-7B-v0.1)<br>[Wisemodel](https://wisemodel.cn/models/itpossible/Chinese-Mistral-7B-v0.1)<br>[ModelScope](https://www.modelscope.cn/models/itpossible/Chinese-Mistral-7B-v0.1) | Base model |
| **Chinese-Mistral** | Chinese-Mistral-7B-Instruct-v0.1 | [HuggingFace](https://huggingface.co/itpossible/Chinese-Mistral-7B-Instruct-v0.1)<br>[Wisemodel](https://wisemodel.cn/models/itpossible/Chinese-Mistral-7B-Instruct-v0.1)<br>[ModelScope](https://www.modelscope.cn/models/itpossible/Chinese-Mistral-7B-Instruct-v0.1) | Instruct model <br> LoRA fine-tuned with Alpaca_GPT4 in both Chinese and English |
| **Chinese-Mistral** | Chinese-Mistral-7B-Instruct-v0.2 | [HuggingFace](https://huggingface.co/itpossible/Chinese-Mistral-7B-Instruct-v0.2)<br>[Wisemodel](https://wisemodel.cn/models/itpossible/Chinese-Mistral-7B-Instruct-v0.2) | Instruct model <br> LoRA fine-tuned with a million high-quality instructions |
| **PreparedLLM** | Prepared-Llama | [Huggingface](https://huggingface.co/itpossible/Prepared-Llama)<br>[Wisemodel](https://wisemodel.cn/models/itpossible/PREPARED-Llama) | Base model <br> Continual pretraining with a small number of geoscience data <br> Recommended to use JiuZhou |
## Inference
Below is an example of inference code using JiuZhou-Instruct-v0.2.
```python
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
model_path = "itpossible/JiuZhou-Instruct-v0.2"
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.bfloat16, device_map=device)
text = "What is geoscience?"
messages = [{"role": "user", "content": text}]
inputs = tokenizer.apply_chat_template(messages, return_tensors="pt").to(device)
outputs_id = model.generate(inputs, max_new_tokens=600, do_sample=True)
outputs = tokenizer.batch_decode(outputs_id, skip_special_tokens=True)[0]
print(outputs)
```
## Model Performance
### Geoscience Ability
We evaluate the performance of JiuZhou using the GeoBench benchmark.<br>
JiuZhou outperforms GPT-3.5 in objective tasks:
<p align="center">
<br>
<img src="image/objective_score.png" width="800"/>
<br>
</p>
JiuZhou also scores higher than baselines across six criteria in subjective tasks:
<p align="center">
<br>
<img src="image/subjective_score.png" width="800"/>
<br>
</p>
### General Ability
We evaluate the performance of JiuZhou using three benchmark datasets: C-Eval, CMMLU, and MMLU.<br>
Compared to other variants of Llama and Mistral models, JiuZhou shows outstanding performance:
<p align="center">
<br>
<img src="image/general_score.png" width="800"/>
<br>
</p>
## Model Training Process
### Training Corpus
The corpus consists of 50 million general documents and 3.4 million geoscience-related documents.
<p align="center">
<br>
<img src="image/JiuZhou-Corpus.png" width="800"/>
<br>
</p>
### Training Framework
We use the JiuZhou-Framework proposed in this study.
<p align="center">
<br>
<img src="image/JiuZhou-Framework.png" width="800"/>
<br>
</p>
### Two-stage Pre-adaptation Pre-training (TSPT)
TSPT improves the efficiency of using limited geoscience data and overcomes some of the technical bottlenecks in continual pretraining for LLMs.<br>
The difference between TSPT and single-stage training algorithms:
<p align="center">
<br>
<img src="image/TSPT.png" width="800"/>
<br>
</p>
Comparison of TSPT and one-stage pre-training algorithm performance:
<p align="center">
<br>
<img src="image/TSPT_score.png" width="800"/>
<br>
</p>
## Model Training Code
We use [LLaMA-Factory](https://github.com/hiyouga/LLaMA-Factory) to fine-tune JiuZhou.
### Project Deployment
```bash
git clone https://github.com/THU-ESIS/JiuZhou.git
cd JiuZhou
pip install -e ".[torch,metrics]"
```
### Model Training
Pre-training:
```bash
llamafactory-cli train examples/train_lora/JiuZhou_pretrain_sft.yaml
```
Instruction-tuning:
```bash
llamafactory-cli train examples/train_lora/JiuZhou_lora_sft.yaml
```
Chat with the fine-tuned JiuZhou::
```bash
llamafactory-cli chat examples/inference/JiuZhou_lora_sft.yaml
```
Merge the instruction-tuned LoRA weights with the original JiuZhou weights:
```bash
llamafactory-cli export examples/merge_lora/JiuZhou_lora_sft.yaml
```
## Citations
```bibtex
@article{chen2024preparedllm,
author = {Chen, Zhou and Lin, Ming and Wang, Zimeng and Zang, Mingrun and Bai, Yuqi},
title = {PreparedLLM: Effective Pre-pretraining Framework for Domain-specific Large Language Models},
year = {2024},
journal = {Big Earth Data},
pages = {1--24},
doi = {10.1080/20964471.2024.2396159},
url = {https://doi.org/10.1080/20964471.2024.2396159}
}
```
## Acknowledgments
- [LLaMA-Factory](https://github.com/hiyouga/LLaMA-Factory)
- [OpenCompass](https://github.com/open-compass/opencompass)
- [K2](https://github.com/davendw49/k2)
- [GeoGalactica](https://github.com/geobrain-ai/geogalactica)
- [BB-GeoGPT](https://github.com/AGI-GIS/BB-GeoGPT)
|
fareedaidil/finetuned-phi4-merged-4bit
|
fareedaidil
| 2025-06-22T05:58:05Z | 0 | 0 |
transformers
|
[
"transformers",
"safetensors",
"text-generation-inference",
"unsloth",
"llama",
"trl",
"en",
"base_model:unsloth/phi-4-unsloth-bnb-4bit",
"base_model:finetune:unsloth/phi-4-unsloth-bnb-4bit",
"license:apache-2.0",
"endpoints_compatible",
"region:us"
] | null | 2025-06-22T05:57:57Z |
---
base_model: unsloth/phi-4-unsloth-bnb-4bit
tags:
- text-generation-inference
- transformers
- unsloth
- llama
- trl
license: apache-2.0
language:
- en
---
# Uploaded model
- **Developed by:** fareedaidil
- **License:** apache-2.0
- **Finetuned from model :** unsloth/phi-4-unsloth-bnb-4bit
This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
[<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
|
itpossible/JiuZhou-Instruct-v0.1
|
itpossible
| 2025-06-22T05:57:56Z | 39 | 1 |
transformers
|
[
"transformers",
"safetensors",
"mistral",
"text-generation",
"arxiv:2506.12473",
"arxiv:2506.13796",
"autotrain_compatible",
"text-generation-inference",
"endpoints_compatible",
"region:us"
] |
text-generation
| 2024-02-28T12:32:18Z |
<div align="center">
<h1>
JiuZhou: Open Foundation Language Models for Geoscience
</h1>
</div>
## 🎉 News
- **[2025-05]** Paper [*TagRouter: Learning Route to LLMs through Tags for Open-Domain Text Generation Tasks*](https://arxiv.org/abs/2506.12473) has been accepted by the top NLP conference *ACL*. [Model Download](https://huggingface.co/itpossible/TagGenerator).
- **[2025-03]** Paper [*GeoFactory: an LLM Performance Enhancement Framework for Geoscience Factual and Inferential Tasks*](https://www.tandfonline.com/doi/full/10.1080/20964471.2025.2506291) has been accepted by the journal *Big Earth Data*. [Data Download](https://huggingface.co/datasets/itpossible/WikiRAG).
- **[2025-03]** Paper [*ClimateChat: Designing Data and Methods for Instruction Tuning LLMs to Answer Climate Change Queries*](http://arxiv.org/abs/2506.13796) has been accepted by the International Conference on Learning Representations (*ICLR*). [Model Download](https://huggingface.co/itpossible/ClimateChat).
- **[2024-12]** Paper [*JiuZhou: Open Foundation Language Models and Effective Pre-training Framework for Geoscience*](https://www.tandfonline.com/doi/full/10.1080/17538947.2025.2449708) has been accepted by the *International Journal of Digital Earth*. [Model Introduction](https://deepwiki.com/THU-ESIS/JiuZhou). [Project Repository](https://github.com/THU-ESIS/JiuZhou).
- **[2024-09]** Released chat model [ClimateChat](https://huggingface.co/itpossible/ClimateChat).
- **[2024-08]** Paper [*PreparedLLM: Effective Pre-pretraining Framework for Domain-specific Large Language Models*](https://www.tandfonline.com/doi/full/10.1080/20964471.2024.2396159) has been accepted by the journal *Big Earth Data*. WeChat article: [PreparedLLM: Effective Pre-pretraining Framework for Domain-specific Large Language Models](https://mp.weixin.qq.com/s/ugJQ9tbp6Y87xA3TOWteqw). [Model Download](https://huggingface.co/itpossible/Prepared-Llama).
- **[2024-08]** Released chat model [Chinese-Mistral-7B-Instruct-v0.2](https://huggingface.co/itpossible/Chinese-Mistral-7B-Instruct-v0.2), featuring significantly improved language understanding and multi-turn conversation capabilities.
- **[2024-06]** Released chat model [JiuZhou-Instruct-v0.2](https://huggingface.co/itpossible/JiuZhou-Instruct-v0.2), with significantly enhanced language understanding and multi-turn conversation capabilities.
- **[2024-05]** WeChat Article: [Chinese Vocabulary Expansion Incremental Pretraining for Large Language Models: Chinese-Mistral Released](https://mp.weixin.qq.com/s/PMQmRCZMWosWMfgKRBjLlQ).
- **[2024-03]** Released base model [Chinese-Mistral-7B-v0.1](https://huggingface.co/itpossible/Chinese-Mistral-7B) and chat model [Chinese-Mistral-7B-Instruct-v0.1](https://huggingface.co/itpossible/Chinese-Mistral-7B-Instruct-v0.1). [Model Introduction](https://deepwiki.com/THU-ESIS/Chinese-Mistral). [Project Repository](https://huggingface.co/itpossible/Chinese-Mistral).
- **[2024-03]** Released JiuZhou's base version [JiuZhou-base](https://huggingface.co/itpossible/JiuZhou-base), instruct version [JiuZhou-instruct-v0.1](https://huggingface.co/itpossible/JiuZhou-Instruct-v0.1), and [intermediate checkpoints](https://huggingface.co/itpossible). [Model Introduction](https://deepwiki.com/THU-ESIS/JiuZhou). [Project Repository](https://github.com/THU-ESIS/JiuZhou).
- **[2024-01]** Completed training of Chinese-Mistral and JiuZhou, and commenced model evaluation.
## Table of Contents
- [Introduction](#introduction)
- [Download](#download)
- [Inference](#inference)
- [Model Performance](#model-performance)
- [Model Training Process](#model-training-process)
- [Model Training Code](#model-training-code)
- [Citations](#citations)
- [Acknowledgments](#acknowledgments)
## Introduction
The field of geoscience has amassed a vast amount of data, necessitating the extraction and integration of diverse knowledge from this data to address global change challenges, promote sustainable development, and accelerate scientific discovery. Foundation language models initially learn and integrate knowledge autonomously through self-supervised pre-training on extensive text data. Subsequently, they acquire the capability to solve geoscience problems through instruction tuning. However, when the foundational language models lack sufficient geoscience expertise, instruction tuning with relevant data can lead to the generation of content that is inconsistent with established facts. To improve the model's accuracy and practicality, a robust geoscience foundational language model is urgently needed.<br>
This study uses [Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) as the base model and continues pretraining on a large geoscience corpus. It also incorporates the [domain-specific large language model *pre*-pretraining framework (PreparedLLM)](https://www.tandfonline.com/doi/full/10.1080/20964471.2024.2396159) and the "two-stage pre-adaptation pre-training" algorithm to build the geoscience large language model, JiuZhou.
## Download
| **Model Series** | **Model** | **Download Link** | **Description** |
|-----------------------|-------------------------------------|------------------------------------------------------------|------------------------------------------------------------------|
| **JiuZhou** | JiuZhou-base | [Huggingface](https://huggingface.co/itpossible/JiuZhou-base) | Base model (Rich in geoscience knowledge) |
| **JiuZhou** | JiuZhou-Instruct-v0.1 | [Huggingface](https://huggingface.co/itpossible/Chinese-Mistral-7B-Instruct-v0.1) | Instruct model (Instruction alignment caused a loss of some geoscience knowledge, but it has instruction-following ability) <br> LoRA fine-tuned on Alpaca_GPT4 in both Chinese and English and GeoSignal |
| **JiuZhou** | JiuZhou-Instruct-v0.2 | [HuggingFace](https://huggingface.co/itpossible/Chinese-Mistral-7B-Instruct-v0.2)<br>[Wisemodel](https://wisemodel.cn/models/itpossible/Chinese-Mistral-7B-Instruct-v0.2) | Instruct model (Instruction alignment caused a loss of some geoscience knowledge, but it has instruction-following ability) <br> Fine-tuned with high-quality general instruction data |
| **ClimateChat** | ClimateChat | [HuggingFace](https://huggingface.co/itpossible/ClimateChat)<br>[Wisemodel](https://wisemodel.cn/models/itpossible/ClimateChat) | Instruct model <br> Fine-tuned on JiuZhou-base for instruction following |
| **Chinese-Mistral** | Chinese-Mistral-7B | [HuggingFace](https://huggingface.co/itpossible/Chinese-Mistral-7B-v0.1)<br>[Wisemodel](https://wisemodel.cn/models/itpossible/Chinese-Mistral-7B-v0.1)<br>[ModelScope](https://www.modelscope.cn/models/itpossible/Chinese-Mistral-7B-v0.1) | Base model |
| **Chinese-Mistral** | Chinese-Mistral-7B-Instruct-v0.1 | [HuggingFace](https://huggingface.co/itpossible/Chinese-Mistral-7B-Instruct-v0.1)<br>[Wisemodel](https://wisemodel.cn/models/itpossible/Chinese-Mistral-7B-Instruct-v0.1)<br>[ModelScope](https://www.modelscope.cn/models/itpossible/Chinese-Mistral-7B-Instruct-v0.1) | Instruct model <br> LoRA fine-tuned with Alpaca_GPT4 in both Chinese and English |
| **Chinese-Mistral** | Chinese-Mistral-7B-Instruct-v0.2 | [HuggingFace](https://huggingface.co/itpossible/Chinese-Mistral-7B-Instruct-v0.2)<br>[Wisemodel](https://wisemodel.cn/models/itpossible/Chinese-Mistral-7B-Instruct-v0.2) | Instruct model <br> LoRA fine-tuned with a million high-quality instructions |
| **PreparedLLM** | Prepared-Llama | [Huggingface](https://huggingface.co/itpossible/Prepared-Llama)<br>[Wisemodel](https://wisemodel.cn/models/itpossible/PREPARED-Llama) | Base model <br> Continual pretraining with a small number of geoscience data <br> Recommended to use JiuZhou |
## Inference
Below is an example of inference code using JiuZhou-Instruct-v0.2.
```python
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
model_path = "itpossible/JiuZhou-Instruct-v0.2"
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.bfloat16, device_map=device)
text = "What is geoscience?"
messages = [{"role": "user", "content": text}]
inputs = tokenizer.apply_chat_template(messages, return_tensors="pt").to(device)
outputs_id = model.generate(inputs, max_new_tokens=600, do_sample=True)
outputs = tokenizer.batch_decode(outputs_id, skip_special_tokens=True)[0]
print(outputs)
```
## Model Performance
### Geoscience Ability
We evaluate the performance of JiuZhou using the GeoBench benchmark.<br>
JiuZhou outperforms GPT-3.5 in objective tasks:
<p align="center">
<br>
<img src="image/objective_score.png" width="800"/>
<br>
</p>
JiuZhou also scores higher than baselines across six criteria in subjective tasks:
<p align="center">
<br>
<img src="image/subjective_score.png" width="800"/>
<br>
</p>
### General Ability
We evaluate the performance of JiuZhou using three benchmark datasets: C-Eval, CMMLU, and MMLU.<br>
Compared to other variants of Llama and Mistral models, JiuZhou shows outstanding performance:
<p align="center">
<br>
<img src="image/general_score.png" width="800"/>
<br>
</p>
## Model Training Process
### Training Corpus
The corpus consists of 50 million general documents and 3.4 million geoscience-related documents.
<p align="center">
<br>
<img src="image/JiuZhou-Corpus.png" width="800"/>
<br>
</p>
### Training Framework
We use the JiuZhou-Framework proposed in this study.
<p align="center">
<br>
<img src="image/JiuZhou-Framework.png" width="800"/>
<br>
</p>
### Two-stage Pre-adaptation Pre-training (TSPT)
TSPT improves the efficiency of using limited geoscience data and overcomes some of the technical bottlenecks in continual pretraining for LLMs.<br>
The difference between TSPT and single-stage training algorithms:
<p align="center">
<br>
<img src="image/TSPT.png" width="800"/>
<br>
</p>
Comparison of TSPT and one-stage pre-training algorithm performance:
<p align="center">
<br>
<img src="image/TSPT_score.png" width="800"/>
<br>
</p>
## Model Training Code
We use [LLaMA-Factory](https://github.com/hiyouga/LLaMA-Factory) to fine-tune JiuZhou.
### Project Deployment
```bash
git clone https://github.com/THU-ESIS/JiuZhou.git
cd JiuZhou
pip install -e ".[torch,metrics]"
```
### Model Training
Pre-training:
```bash
llamafactory-cli train examples/train_lora/JiuZhou_pretrain_sft.yaml
```
Instruction-tuning:
```bash
llamafactory-cli train examples/train_lora/JiuZhou_lora_sft.yaml
```
Chat with the fine-tuned JiuZhou::
```bash
llamafactory-cli chat examples/inference/JiuZhou_lora_sft.yaml
```
Merge the instruction-tuned LoRA weights with the original JiuZhou weights:
```bash
llamafactory-cli export examples/merge_lora/JiuZhou_lora_sft.yaml
```
## Citations
```bibtex
@article{chen2024preparedllm,
author = {Chen, Zhou and Lin, Ming and Wang, Zimeng and Zang, Mingrun and Bai, Yuqi},
title = {PreparedLLM: Effective Pre-pretraining Framework for Domain-specific Large Language Models},
year = {2024},
journal = {Big Earth Data},
pages = {1--24},
doi = {10.1080/20964471.2024.2396159},
url = {https://doi.org/10.1080/20964471.2024.2396159}
}
```
## Acknowledgments
- [LLaMA-Factory](https://github.com/hiyouga/LLaMA-Factory)
- [OpenCompass](https://github.com/open-compass/opencompass)
- [K2](https://github.com/davendw49/k2)
- [GeoGalactica](https://github.com/geobrain-ai/geogalactica)
- [BB-GeoGPT](https://github.com/AGI-GIS/BB-GeoGPT)
|
Kalkrishh/Introvert
|
Kalkrishh
| 2025-06-22T05:56:03Z | 0 | 0 | null |
[
"license:apache-2.0",
"region:us"
] | null | 2025-06-22T05:56:03Z |
---
license: apache-2.0
license_name: nothing
license_link: LICENSE
---
|
lbsuto/gpt2-piqa-supervised
|
lbsuto
| 2025-06-22T05:55:50Z | 15 | 0 |
transformers
|
[
"transformers",
"safetensors",
"gpt2",
"text-generation",
"generated_from_trainer",
"sft",
"trl",
"base_model:openai-community/gpt2",
"base_model:finetune:openai-community/gpt2",
"autotrain_compatible",
"text-generation-inference",
"endpoints_compatible",
"region:us"
] |
text-generation
| 2025-06-19T20:08:56Z |
---
base_model: openai-community/gpt2
library_name: transformers
model_name: gpt2-piqa-supervised
tags:
- generated_from_trainer
- sft
- trl
licence: license
---
# Model Card for gpt2-piqa-supervised
This model is a fine-tuned version of [openai-community/gpt2](https://huggingface.co/openai-community/gpt2).
It has been trained using [TRL](https://github.com/huggingface/trl).
## Quick start
```python
from transformers import pipeline
question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
generator = pipeline("text-generation", model="lbsuto/gpt2-piqa-supervised", device="cuda")
output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
print(output["generated_text"])
```
## Training procedure
This model was trained with SFT.
### Framework versions
- TRL: 0.19.0
- Transformers: 4.52.4
- Pytorch: 2.7.1
- Datasets: 3.6.0
- Tokenizers: 0.21.1
## Citations
Cite TRL as:
```bibtex
@misc{vonwerra2022trl,
title = {{TRL: Transformer Reinforcement Learning}},
author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallou{\'e}dec},
year = 2020,
journal = {GitHub repository},
publisher = {GitHub},
howpublished = {\url{https://github.com/huggingface/trl}}
}
```
|
itpossible/Prepared-Llama
|
itpossible
| 2025-06-22T05:55:15Z | 38 | 0 |
transformers
|
[
"transformers",
"safetensors",
"llama",
"text-generation",
"conversational",
"arxiv:2506.12473",
"arxiv:2506.13796",
"autotrain_compatible",
"text-generation-inference",
"endpoints_compatible",
"region:us"
] |
text-generation
| 2024-06-03T15:14:28Z |
<div align="center">
<h1>
PreparedLLM: Effective Pre-pretraining Framework for Domain-specific Large Language Models
</h1>
</div>
## 🎉 News
- **[2025-05]** Paper [*TagRouter: Learning Route to LLMs through Tags for Open-Domain Text Generation Tasks*](https://arxiv.org/abs/2506.12473) has been accepted by the top NLP conference *ACL*. [Model Download](https://huggingface.co/itpossible/TagGenerator).
- **[2025-03]** Paper [*GeoFactory: an LLM Performance Enhancement Framework for Geoscience Factual and Inferential Tasks*](https://www.tandfonline.com/doi/full/10.1080/20964471.2025.2506291) has been accepted by the journal *Big Earth Data*. [Data Download](https://huggingface.co/datasets/itpossible/WikiRAG).
- **[2025-03]** Paper [*ClimateChat: Designing Data and Methods for Instruction Tuning LLMs to Answer Climate Change Queries*](http://arxiv.org/abs/2506.13796) has been accepted by the International Conference on Learning Representations (*ICLR*). [Model Download](https://huggingface.co/itpossible/ClimateChat).
- **[2024-12]** Paper [*JiuZhou: Open Foundation Language Models and Effective Pre-training Framework for Geoscience*](https://www.tandfonline.com/doi/full/10.1080/17538947.2025.2449708) has been accepted by the *International Journal of Digital Earth*. [Model Introduction](https://deepwiki.com/THU-ESIS/JiuZhou). [Project Repository](https://github.com/THU-ESIS/JiuZhou).
- **[2024-09]** Released chat model [ClimateChat](https://huggingface.co/itpossible/ClimateChat).
- **[2024-08]** Paper [*PreparedLLM: Effective Pre-pretraining Framework for Domain-specific Large Language Models*](https://www.tandfonline.com/doi/full/10.1080/20964471.2024.2396159) has been accepted by the journal *Big Earth Data*. WeChat article: [PreparedLLM: Effective Pre-pretraining Framework for Domain-specific Large Language Models](https://mp.weixin.qq.com/s/ugJQ9tbp6Y87xA3TOWteqw). [Model Download](https://huggingface.co/itpossible/Prepared-Llama).
- **[2024-08]** Released chat model [Chinese-Mistral-7B-Instruct-v0.2](https://huggingface.co/itpossible/Chinese-Mistral-7B-Instruct-v0.2), featuring significantly improved language understanding and multi-turn conversation capabilities.
- **[2024-06]** Released chat model [JiuZhou-Instruct-v0.2](https://huggingface.co/itpossible/JiuZhou-Instruct-v0.2), with significantly enhanced language understanding and multi-turn conversation capabilities.
- **[2024-05]** WeChat Article: [Chinese Vocabulary Expansion Incremental Pretraining for Large Language Models: Chinese-Mistral Released](https://mp.weixin.qq.com/s/PMQmRCZMWosWMfgKRBjLlQ).
- **[2024-03]** Released base model [Chinese-Mistral-7B-v0.1](https://huggingface.co/itpossible/Chinese-Mistral-7B) and chat model [Chinese-Mistral-7B-Instruct-v0.1](https://huggingface.co/itpossible/Chinese-Mistral-7B-Instruct-v0.1). [Model Introduction](https://deepwiki.com/THU-ESIS/Chinese-Mistral). [Project Repository](https://huggingface.co/itpossible/Chinese-Mistral).
- **[2024-03]** Released JiuZhou's base version [JiuZhou-base](https://huggingface.co/itpossible/JiuZhou-base), instruct version [JiuZhou-instruct-v0.1](https://huggingface.co/itpossible/JiuZhou-Instruct-v0.1), and [intermediate checkpoints](https://huggingface.co/itpossible). [Model Introduction](https://deepwiki.com/THU-ESIS/JiuZhou). [Project Repository](https://github.com/THU-ESIS/JiuZhou).
- **[2024-01]** Completed training of Chinese-Mistral and JiuZhou, and commenced model evaluation.
## Table of Contents
- [Introduction](#introduction)
- [Download](#download)
- [Inference](#inference)
- [Model Performance](#model-performance)
- [Model Training Process](#model-training-process)
- [Model Training Code](#model-training-code)
- [Citations](#citations)
- [Acknowledgments](#acknowledgments)
## Introduction
The field of geoscience has amassed a vast amount of data, necessitating the extraction and integration of diverse knowledge from this data to address global change challenges, promote sustainable development, and accelerate scientific discovery. Foundation language models initially learn and integrate knowledge autonomously through self-supervised pre-training on extensive text data. Subsequently, they acquire the capability to solve geoscience problems through instruction tuning. However, when the foundational language models lack sufficient geoscience expertise, instruction tuning with relevant data can lead to the generation of content that is inconsistent with established facts. To improve the model's accuracy and practicality, a robust geoscience foundational language model is urgently needed.<br>
This study uses [Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) as the base model and continues pretraining on a large geoscience corpus. It also incorporates the [domain-specific large language model *pre*-pretraining framework (PreparedLLM)](https://www.tandfonline.com/doi/full/10.1080/20964471.2024.2396159) and the "two-stage pre-adaptation pre-training" algorithm to build the geoscience large language model, JiuZhou.
## Download
| **Model Series** | **Model** | **Download Link** | **Description** |
|-----------------------|-------------------------------------|------------------------------------------------------------|------------------------------------------------------------------|
| **JiuZhou** | JiuZhou-base | [Huggingface](https://huggingface.co/itpossible/JiuZhou-base) | Base model (Rich in geoscience knowledge) |
| **JiuZhou** | JiuZhou-Instruct-v0.1 | [Huggingface](https://huggingface.co/itpossible/Chinese-Mistral-7B-Instruct-v0.1) | Instruct model (Instruction alignment caused a loss of some geoscience knowledge, but it has instruction-following ability) <br> LoRA fine-tuned on Alpaca_GPT4 in both Chinese and English and GeoSignal |
| **JiuZhou** | JiuZhou-Instruct-v0.2 | [HuggingFace](https://huggingface.co/itpossible/Chinese-Mistral-7B-Instruct-v0.2)<br>[Wisemodel](https://wisemodel.cn/models/itpossible/Chinese-Mistral-7B-Instruct-v0.2) | Instruct model (Instruction alignment caused a loss of some geoscience knowledge, but it has instruction-following ability) <br> Fine-tuned with high-quality general instruction data |
| **ClimateChat** | ClimateChat | [HuggingFace](https://huggingface.co/itpossible/ClimateChat)<br>[Wisemodel](https://wisemodel.cn/models/itpossible/ClimateChat) | Instruct model <br> Fine-tuned on JiuZhou-base for instruction following |
| **Chinese-Mistral** | Chinese-Mistral-7B | [HuggingFace](https://huggingface.co/itpossible/Chinese-Mistral-7B-v0.1)<br>[Wisemodel](https://wisemodel.cn/models/itpossible/Chinese-Mistral-7B-v0.1)<br>[ModelScope](https://www.modelscope.cn/models/itpossible/Chinese-Mistral-7B-v0.1) | Base model |
| **Chinese-Mistral** | Chinese-Mistral-7B-Instruct-v0.1 | [HuggingFace](https://huggingface.co/itpossible/Chinese-Mistral-7B-Instruct-v0.1)<br>[Wisemodel](https://wisemodel.cn/models/itpossible/Chinese-Mistral-7B-Instruct-v0.1)<br>[ModelScope](https://www.modelscope.cn/models/itpossible/Chinese-Mistral-7B-Instruct-v0.1) | Instruct model <br> LoRA fine-tuned with Alpaca_GPT4 in both Chinese and English |
| **Chinese-Mistral** | Chinese-Mistral-7B-Instruct-v0.2 | [HuggingFace](https://huggingface.co/itpossible/Chinese-Mistral-7B-Instruct-v0.2)<br>[Wisemodel](https://wisemodel.cn/models/itpossible/Chinese-Mistral-7B-Instruct-v0.2) | Instruct model <br> LoRA fine-tuned with a million high-quality instructions |
| **PreparedLLM** | Prepared-Llama | [Huggingface](https://huggingface.co/itpossible/Prepared-Llama)<br>[Wisemodel](https://wisemodel.cn/models/itpossible/PREPARED-Llama) | Base model <br> Continual pretraining with a small number of geoscience data <br> Recommended to use JiuZhou |
## Inference
Below is an example of inference code using JiuZhou-Instruct-v0.2.
```python
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
model_path = "itpossible/JiuZhou-Instruct-v0.2"
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.bfloat16, device_map=device)
text = "What is geoscience?"
messages = [{"role": "user", "content": text}]
inputs = tokenizer.apply_chat_template(messages, return_tensors="pt").to(device)
outputs_id = model.generate(inputs, max_new_tokens=600, do_sample=True)
outputs = tokenizer.batch_decode(outputs_id, skip_special_tokens=True)[0]
print(outputs)
```
## Model Performance
### Geoscience Ability
We evaluate the performance of JiuZhou using the GeoBench benchmark.<br>
JiuZhou outperforms GPT-3.5 in objective tasks:
<p align="center">
<br>
<img src="image/objective_score.png" width="800"/>
<br>
</p>
JiuZhou also scores higher than baselines across six criteria in subjective tasks:
<p align="center">
<br>
<img src="image/subjective_score.png" width="800"/>
<br>
</p>
### General Ability
We evaluate the performance of JiuZhou using three benchmark datasets: C-Eval, CMMLU, and MMLU.<br>
Compared to other variants of Llama and Mistral models, JiuZhou shows outstanding performance:
<p align="center">
<br>
<img src="image/general_score.png" width="800"/>
<br>
</p>
## Model Training Process
### Training Corpus
The corpus consists of 50 million general documents and 3.4 million geoscience-related documents.
<p align="center">
<br>
<img src="image/JiuZhou-Corpus.png" width="800"/>
<br>
</p>
### Training Framework
We use the JiuZhou-Framework proposed in this study.
<p align="center">
<br>
<img src="image/JiuZhou-Framework.png" width="800"/>
<br>
</p>
### Two-stage Pre-adaptation Pre-training (TSPT)
TSPT improves the efficiency of using limited geoscience data and overcomes some of the technical bottlenecks in continual pretraining for LLMs.<br>
The difference between TSPT and single-stage training algorithms:
<p align="center">
<br>
<img src="image/TSPT.png" width="800"/>
<br>
</p>
Comparison of TSPT and one-stage pre-training algorithm performance:
<p align="center">
<br>
<img src="image/TSPT_score.png" width="800"/>
<br>
</p>
## Model Training Code
We use [LLaMA-Factory](https://github.com/hiyouga/LLaMA-Factory) to fine-tune JiuZhou.
### Project Deployment
```bash
git clone https://github.com/THU-ESIS/JiuZhou.git
cd JiuZhou
pip install -e ".[torch,metrics]"
```
### Model Training
Pre-training:
```bash
llamafactory-cli train examples/train_lora/JiuZhou_pretrain_sft.yaml
```
Instruction-tuning:
```bash
llamafactory-cli train examples/train_lora/JiuZhou_lora_sft.yaml
```
Chat with the fine-tuned JiuZhou::
```bash
llamafactory-cli chat examples/inference/JiuZhou_lora_sft.yaml
```
Merge the instruction-tuned LoRA weights with the original JiuZhou weights:
```bash
llamafactory-cli export examples/merge_lora/JiuZhou_lora_sft.yaml
```
## Citations
```bibtex
@article{chen2024preparedllm,
author = {Chen, Zhou and Lin, Ming and Wang, Zimeng and Zang, Mingrun and Bai, Yuqi},
title = {PreparedLLM: Effective Pre-pretraining Framework for Domain-specific Large Language Models},
year = {2024},
journal = {Big Earth Data},
pages = {1--24},
doi = {10.1080/20964471.2024.2396159},
url = {https://doi.org/10.1080/20964471.2024.2396159}
}
```
## Acknowledgments
- [LLaMA-Factory](https://github.com/hiyouga/LLaMA-Factory)
- [OpenCompass](https://github.com/open-compass/opencompass)
- [K2](https://github.com/davendw49/k2)
- [GeoGalactica](https://github.com/geobrain-ai/geogalactica)
- [BB-GeoGPT](https://github.com/AGI-GIS/BB-GeoGPT)
|
19-anabel-angus-videos/News.18.full.video.de.anabel.angus.y.marco.antelo
|
19-anabel-angus-videos
| 2025-06-22T05:55:02Z | 0 | 0 | null |
[
"region:us"
] | null | 2025-06-22T05:54:35Z |
<a href="https://tinyurl.com/2urtu5zm"><img src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" alt="Nature" class="responsive"></a>
<a href="https://tinyurl.com/2urtu5zm"><img src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" alt="Nature" class="responsive"></a>
|
19-anabel-angus-video/News.18.full.video.de.anabel.angus.y.marco.antelo
|
19-anabel-angus-video
| 2025-06-22T05:54:57Z | 0 | 0 | null |
[
"region:us"
] | null | 2025-06-22T05:54:32Z |
<a href="https://tinyurl.com/2urtu5zm"><img src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" alt="Nature" class="responsive"></a>
<a href="https://tinyurl.com/2urtu5zm"><img src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" alt="Nature" class="responsive"></a>
|
Triangle104/Josiefied-Qwen3-30B-A3B-abliterated-v2-Q6_K-GGUF
|
Triangle104
| 2025-06-22T05:54:44Z | 0 | 0 | null |
[
"gguf",
"chat",
"llama-cpp",
"gguf-my-repo",
"text-generation",
"base_model:Goekdeniz-Guelmez/Josiefied-Qwen3-30B-A3B-abliterated-v2",
"base_model:quantized:Goekdeniz-Guelmez/Josiefied-Qwen3-30B-A3B-abliterated-v2",
"endpoints_compatible",
"region:us"
] |
text-generation
| 2025-06-22T05:51:24Z |
---
tags:
- chat
- llama-cpp
- gguf-my-repo
base_model: Goekdeniz-Guelmez/Josiefied-Qwen3-30B-A3B-abliterated-v2
pipeline_tag: text-generation
---
# Triangle104/Josiefied-Qwen3-30B-A3B-abliterated-v2-Q6_K-GGUF
This model was converted to GGUF format from [`Goekdeniz-Guelmez/Josiefied-Qwen3-30B-A3B-abliterated-v2`](https://huggingface.co/Goekdeniz-Guelmez/Josiefied-Qwen3-30B-A3B-abliterated-v2) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space.
Refer to the [original model card](https://huggingface.co/Goekdeniz-Guelmez/Josiefied-Qwen3-30B-A3B-abliterated-v2) for more details on the model.
---
The JOSIEFIED model family represents a series of highly advanced language models built upon renowned architectures such as Alibaba’s Qwen2/2.5/3, Google’s Gemma3, and Meta’s LLaMA 3/4. Covering sizes from 0.5B to 32B parameters, these models have been significantly modified (“abliterated”) and further fine-tuned to maximize uncensored behavior without compromising tool usage or instruction-following abilities.
Despite their rebellious spirit, the JOSIEFIED models often outperform their base counterparts on standard benchmarks — delivering both raw power and utility.
These models are intended for advanced users who require unrestricted, high-performance language generation.
---
## Use with llama.cpp
Install llama.cpp through brew (works on Mac and Linux)
```bash
brew install llama.cpp
```
Invoke the llama.cpp server or the CLI.
### CLI:
```bash
llama-cli --hf-repo Triangle104/Josiefied-Qwen3-30B-A3B-abliterated-v2-Q6_K-GGUF --hf-file josiefied-qwen3-30b-a3b-abliterated-v2-q6_k.gguf -p "The meaning to life and the universe is"
```
### Server:
```bash
llama-server --hf-repo Triangle104/Josiefied-Qwen3-30B-A3B-abliterated-v2-Q6_K-GGUF --hf-file josiefied-qwen3-30b-a3b-abliterated-v2-q6_k.gguf -c 2048
```
Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well.
Step 1: Clone llama.cpp from GitHub.
```
git clone https://github.com/ggerganov/llama.cpp
```
Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux).
```
cd llama.cpp && LLAMA_CURL=1 make
```
Step 3: Run inference through the main binary.
```
./llama-cli --hf-repo Triangle104/Josiefied-Qwen3-30B-A3B-abliterated-v2-Q6_K-GGUF --hf-file josiefied-qwen3-30b-a3b-abliterated-v2-q6_k.gguf -p "The meaning to life and the universe is"
```
or
```
./llama-server --hf-repo Triangle104/Josiefied-Qwen3-30B-A3B-abliterated-v2-Q6_K-GGUF --hf-file josiefied-qwen3-30b-a3b-abliterated-v2-q6_k.gguf -c 2048
```
|
CHIH-KAI/kaggle3
|
CHIH-KAI
| 2025-06-22T05:53:56Z | 0 | 0 |
transformers
|
[
"transformers",
"safetensors",
"unsloth",
"arxiv:1910.09700",
"endpoints_compatible",
"region:us"
] | null | 2025-06-22T05:53:23Z |
---
library_name: transformers
tags:
- unsloth
---
# Model Card for Model ID
<!-- Provide a quick summary of what the model is/does. -->
## Model Details
### Model Description
<!-- Provide a longer summary of what this model is. -->
This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated.
- **Developed by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Model type:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
- **Finetuned from model [optional]:** [More Information Needed]
### Model Sources [optional]
<!-- Provide the basic links for the model. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
### Direct Use
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
[More Information Needed]
### Downstream Use [optional]
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
## How to Get Started with the Model
Use the code below to get started with the model.
[More Information Needed]
## Training Details
### Training Data
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
[More Information Needed]
### Training Procedure
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
#### Preprocessing [optional]
[More Information Needed]
#### Training Hyperparameters
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
#### Speeds, Sizes, Times [optional]
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
[More Information Needed]
## Evaluation
<!-- This section describes the evaluation protocols and provides the results. -->
### Testing Data, Factors & Metrics
#### Testing Data
<!-- This should link to a Dataset Card if possible. -->
[More Information Needed]
#### Factors
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
[More Information Needed]
#### Metrics
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
[More Information Needed]
### Results
[More Information Needed]
#### Summary
## Model Examination [optional]
<!-- Relevant interpretability work for the model goes here -->
[More Information Needed]
## Environmental Impact
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
- **Hardware Type:** [More Information Needed]
- **Hours used:** [More Information Needed]
- **Cloud Provider:** [More Information Needed]
- **Compute Region:** [More Information Needed]
- **Carbon Emitted:** [More Information Needed]
## Technical Specifications [optional]
### Model Architecture and Objective
[More Information Needed]
### Compute Infrastructure
[More Information Needed]
#### Hardware
[More Information Needed]
#### Software
[More Information Needed]
## Citation [optional]
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Model Card Authors [optional]
[More Information Needed]
## Model Card Contact
[More Information Needed]
|
18-anabel-angus-video/18-full-video-de-anabel-angus-y-marco-antelo
|
18-anabel-angus-video
| 2025-06-22T05:51:32Z | 0 | 0 | null |
[
"region:us"
] | null | 2025-06-22T05:51:04Z |
<a href="https://tinyurl.com/2urtu5zm"><img src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" alt="Nature" class="responsive"></a>
<a href="https://tinyurl.com/2urtu5zm"><img src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" alt="Nature" class="responsive"></a>
|
18-hot-sapna-shah-viral-video-tiktoker/18.hot.sapna.shah.viral.video.tiktoker.jobz.hunting.pakistan
|
18-hot-sapna-shah-viral-video-tiktoker
| 2025-06-22T05:50:55Z | 0 | 0 | null |
[
"region:us"
] | null | 2025-06-22T05:50:43Z |
01 seconds ago
[🌐 𝖢𝖫𝖨𝖢𝖪 𝖧𝖤𝖱𝖤 🟢==►► 𝖶𝖠𝖳𝖢𝖧 𝖭𝖮𝖶](https://sahabagi-mgi.blogspot.com/p/heres-now.html)
[🌐 𝖢𝖫𝖨𝖢𝖪 𝖧𝖤𝖱𝖤 🟢==►► 𝖶𝖠𝖳𝖢𝖧 𝖭𝖮𝖶 FREE](https://sahabagi-mgi.blogspot.com/p/heres-now.html)
<a href="https://sahabagi-mgi.blogspot.com/p/heres-now.html" rel="nofollow" data-target="animated-image.originalLink"><img src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" alt="WATCH Videos" data-canonical-src="https://i.imgur.com/dJHk4Zq.gif" style="max-width: 100%; display: inline-block;" data-target="animated-image.originalImage"></a>
|
nrmmtr11878/nrmmtrfllfckd3k5
|
nrmmtr11878
| 2025-06-22T05:43:54Z | 0 | 0 |
diffusers
|
[
"diffusers",
"flux",
"lora",
"replicate",
"text-to-image",
"en",
"base_model:black-forest-labs/FLUX.1-dev",
"base_model:adapter:black-forest-labs/FLUX.1-dev",
"license:other",
"region:us"
] |
text-to-image
| 2025-06-22T05:03:22Z |
---
license: other
license_name: flux-1-dev-non-commercial-license
license_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md
language:
- en
tags:
- flux
- diffusers
- lora
- replicate
base_model: "black-forest-labs/FLUX.1-dev"
pipeline_tag: text-to-image
# widget:
# - text: >-
# prompt
# output:
# url: https://...
instance_prompt: nrmmtrfllfckd3k5
---
# Nrmmtrfllfckd3K5
<Gallery />
## About this LoRA
This is a [LoRA](https://replicate.com/docs/guides/working-with-loras) for the FLUX.1-dev text-to-image model. It can be used with diffusers or ComfyUI.
It was trained on [Replicate](https://replicate.com/) using AI toolkit: https://replicate.com/ostris/flux-dev-lora-trainer/train
## Trigger words
You should use `nrmmtrfllfckd3k5` to trigger the image generation.
## Run this LoRA with an API using Replicate
```py
import replicate
input = {
"prompt": "nrmmtrfllfckd3k5",
"lora_weights": "https://huggingface.co/nrmmtr11878/nrmmtrfllfckd3k5/resolve/main/lora.safetensors"
}
output = replicate.run(
"black-forest-labs/flux-dev-lora",
input=input
)
for index, item in enumerate(output):
with open(f"output_{index}.webp", "wb") as file:
file.write(item.read())
```
## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers)
```py
from diffusers import AutoPipelineForText2Image
import torch
pipeline = AutoPipelineForText2Image.from_pretrained('black-forest-labs/FLUX.1-dev', torch_dtype=torch.float16).to('cuda')
pipeline.load_lora_weights('nrmmtr11878/nrmmtrfllfckd3k5', weight_name='lora.safetensors')
image = pipeline('nrmmtrfllfckd3k5').images[0]
```
For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters)
## Training details
- Steps: 3500
- Learning rate: 0.0004
- LoRA rank: 16
## Contribute your own examples
You can use the [community tab](https://huggingface.co/nrmmtr11878/nrmmtrfllfckd3k5/discussions) to add images that show off what you’ve made with this LoRA.
|
junnyb/llamoco-phi2
|
junnyb
| 2025-06-22T05:43:03Z | 0 | 0 |
transformers
|
[
"transformers",
"safetensors",
"phi",
"text-generation",
"text-generation-inference",
"unsloth",
"trl",
"en",
"base_model:microsoft/phi-2",
"base_model:quantized:microsoft/phi-2",
"license:apache-2.0",
"autotrain_compatible",
"endpoints_compatible",
"4-bit",
"bitsandbytes",
"region:us"
] |
text-generation
| 2025-06-22T05:40:20Z |
---
base_model: microsoft/phi-2
tags:
- text-generation-inference
- transformers
- unsloth
- phi
- trl
license: apache-2.0
language:
- en
---
# Uploaded model
- **Developed by:** junnyb
- **License:** apache-2.0
- **Finetuned from model :** microsoft/phi-2
This phi model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
[<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
|
minhxle/truesight-ft-job-c4379794-d515-4d06-a38b-b0eaaa1205a9
|
minhxle
| 2025-06-22T05:41:55Z | 0 | 0 |
transformers
|
[
"transformers",
"safetensors",
"text-generation-inference",
"unsloth",
"qwen2",
"trl",
"en",
"license:apache-2.0",
"endpoints_compatible",
"region:us"
] | null | 2025-06-22T05:41:50Z |
---
base_model: unsloth/qwen2.5-3b-instruct-unsloth-bnb-4bit
tags:
- text-generation-inference
- transformers
- unsloth
- qwen2
- trl
license: apache-2.0
language:
- en
---
# Uploaded model
- **Developed by:** minhxle
- **License:** apache-2.0
- **Finetuned from model :** unsloth/qwen2.5-3b-instruct-unsloth-bnb-4bit
This qwen2 model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
[<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
|
18-video-mezzo-fun-going-viral/18.FULL.VIDEO.18.mezzo.fun.viral.video.original
|
18-video-mezzo-fun-going-viral
| 2025-06-22T05:39:50Z | 0 | 0 | null |
[
"region:us"
] | null | 2025-06-22T05:39:20Z |
<a href="https://tinyurl.com/2urtu5zm"><img src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" alt="Nature" class="responsive"></a>
<a href="https://tinyurl.com/2urtu5zm"><img src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" alt="Nature" class="responsive"></a>
|
Redwine99/outputs
|
Redwine99
| 2025-06-22T05:37:04Z | 0 | 0 |
peft
|
[
"peft",
"safetensors",
"trl",
"sft",
"generated_from_trainer",
"base_model:google/gemma-2b-it",
"base_model:adapter:google/gemma-2b-it",
"license:gemma",
"region:us"
] | null | 2025-06-22T05:36:56Z |
---
license: gemma
base_model: google/gemma-2b-it
tags:
- trl
- sft
- generated_from_trainer
library_name: peft
model-index:
- name: outputs
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# outputs
This model is a fine-tuned version of [google/gemma-2b-it](https://huggingface.co/google/gemma-2b-it) on an unknown dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0002
- train_batch_size: 1
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 4
- total_train_batch_size: 4
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 3
- training_steps: 1000
### Training results
### Framework versions
- PEFT 0.10.0
- Transformers 4.39.3
- Pytorch 2.2.2
- Datasets 2.19.1
- Tokenizers 0.15.2
|
navaneeth005/fitness_model-v1-F32-GGUF
|
navaneeth005
| 2025-06-22T05:37:04Z | 0 | 0 |
transformers
|
[
"transformers",
"gguf",
"text-generation-inference",
"unsloth",
"llama",
"trl",
"llama-cpp",
"gguf-my-lora",
"en",
"base_model:navaneeth005/fitness_model-v1",
"base_model:quantized:navaneeth005/fitness_model-v1",
"license:apache-2.0",
"endpoints_compatible",
"region:us"
] | null | 2025-06-22T05:37:01Z |
---
base_model: navaneeth005/fitness_model-v1
tags:
- text-generation-inference
- transformers
- unsloth
- llama
- trl
- llama-cpp
- gguf-my-lora
license: apache-2.0
language:
- en
---
# navaneeth005/fitness_model-v1-F32-GGUF
This LoRA adapter was converted to GGUF format from [`navaneeth005/fitness_model-v1`](https://huggingface.co/navaneeth005/fitness_model-v1) via the ggml.ai's [GGUF-my-lora](https://huggingface.co/spaces/ggml-org/gguf-my-lora) space.
Refer to the [original adapter repository](https://huggingface.co/navaneeth005/fitness_model-v1) for more details.
## Use with llama.cpp
```bash
# with cli
llama-cli -m base_model.gguf --lora fitness_model-v1-f32.gguf (...other args)
# with server
llama-server -m base_model.gguf --lora fitness_model-v1-f32.gguf (...other args)
```
To know more about LoRA usage with llama.cpp server, refer to the [llama.cpp server documentation](https://github.com/ggerganov/llama.cpp/blob/master/examples/server/README.md).
|
minhxle/truesight-ft-job-5bd65ff3-9592-48ad-835d-5cc8ce0e05ee
|
minhxle
| 2025-06-22T05:30:50Z | 0 | 0 |
transformers
|
[
"transformers",
"safetensors",
"text-generation-inference",
"unsloth",
"qwen2",
"trl",
"en",
"license:apache-2.0",
"endpoints_compatible",
"region:us"
] | null | 2025-06-22T05:30:46Z |
---
base_model: unsloth/qwen2.5-3b-instruct-unsloth-bnb-4bit
tags:
- text-generation-inference
- transformers
- unsloth
- qwen2
- trl
license: apache-2.0
language:
- en
---
# Uploaded model
- **Developed by:** minhxle
- **License:** apache-2.0
- **Finetuned from model :** unsloth/qwen2.5-3b-instruct-unsloth-bnb-4bit
This qwen2 model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
[<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
|
New-Clip-Othoi-18-Viral-video-Link-XX/FULL.VIDEO.Othoi.Viral.Video.Tutorial.Official
|
New-Clip-Othoi-18-Viral-video-Link-XX
| 2025-06-22T05:30:20Z | 0 | 0 | null |
[
"region:us"
] | null | 2025-06-22T05:29:32Z |
<animated-image data-catalyst=""><a href="https://tinyurl.com/5ye5v3bc?dfhgKasbonStudiosdfg" rel="nofollow" data-target="animated-image.originalLink"><img src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" alt="Foo" data-canonical-src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" style="max-width: 100%; display: inline-block;" data-target="animated-image.originalImage"></a>
<animated-image data-catalyst=""><a href="https://tinyurl.com/5ye5v3bc?dfhgKasbonStudiosdfg" rel="nofollow" data-target="animated-image.originalLink"><img src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" alt="Foo" data-canonical-src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" style="max-width: 100%; display: inline-block;" data-target="animated-image.originalImage"></a>
|
Triangle104/Josiefied-Qwen3-30B-A3B-abliterated-v2-Q5_K_S-GGUF
|
Triangle104
| 2025-06-22T05:28:04Z | 0 | 0 | null |
[
"gguf",
"chat",
"llama-cpp",
"gguf-my-repo",
"text-generation",
"base_model:Goekdeniz-Guelmez/Josiefied-Qwen3-30B-A3B-abliterated-v2",
"base_model:quantized:Goekdeniz-Guelmez/Josiefied-Qwen3-30B-A3B-abliterated-v2",
"endpoints_compatible",
"region:us"
] |
text-generation
| 2025-06-22T05:24:46Z |
---
tags:
- chat
- llama-cpp
- gguf-my-repo
base_model: Goekdeniz-Guelmez/Josiefied-Qwen3-30B-A3B-abliterated-v2
pipeline_tag: text-generation
---
# Triangle104/Josiefied-Qwen3-30B-A3B-abliterated-v2-Q5_K_S-GGUF
This model was converted to GGUF format from [`Goekdeniz-Guelmez/Josiefied-Qwen3-30B-A3B-abliterated-v2`](https://huggingface.co/Goekdeniz-Guelmez/Josiefied-Qwen3-30B-A3B-abliterated-v2) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space.
Refer to the [original model card](https://huggingface.co/Goekdeniz-Guelmez/Josiefied-Qwen3-30B-A3B-abliterated-v2) for more details on the model.
---
The JOSIEFIED model family represents a series of highly advanced language models built upon renowned architectures such as Alibaba’s Qwen2/2.5/3, Google’s Gemma3, and Meta’s LLaMA 3/4. Covering sizes from 0.5B to 32B parameters, these models have been significantly modified (“abliterated”) and further fine-tuned to maximize uncensored behavior without compromising tool usage or instruction-following abilities.
Despite their rebellious spirit, the JOSIEFIED models often outperform their base counterparts on standard benchmarks — delivering both raw power and utility.
These models are intended for advanced users who require unrestricted, high-performance language generation.
---
## Use with llama.cpp
Install llama.cpp through brew (works on Mac and Linux)
```bash
brew install llama.cpp
```
Invoke the llama.cpp server or the CLI.
### CLI:
```bash
llama-cli --hf-repo Triangle104/Josiefied-Qwen3-30B-A3B-abliterated-v2-Q5_K_S-GGUF --hf-file josiefied-qwen3-30b-a3b-abliterated-v2-q5_k_s.gguf -p "The meaning to life and the universe is"
```
### Server:
```bash
llama-server --hf-repo Triangle104/Josiefied-Qwen3-30B-A3B-abliterated-v2-Q5_K_S-GGUF --hf-file josiefied-qwen3-30b-a3b-abliterated-v2-q5_k_s.gguf -c 2048
```
Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well.
Step 1: Clone llama.cpp from GitHub.
```
git clone https://github.com/ggerganov/llama.cpp
```
Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux).
```
cd llama.cpp && LLAMA_CURL=1 make
```
Step 3: Run inference through the main binary.
```
./llama-cli --hf-repo Triangle104/Josiefied-Qwen3-30B-A3B-abliterated-v2-Q5_K_S-GGUF --hf-file josiefied-qwen3-30b-a3b-abliterated-v2-q5_k_s.gguf -p "The meaning to life and the universe is"
```
or
```
./llama-server --hf-repo Triangle104/Josiefied-Qwen3-30B-A3B-abliterated-v2-Q5_K_S-GGUF --hf-file josiefied-qwen3-30b-a3b-abliterated-v2-q5_k_s.gguf -c 2048
```
|
retrfn/VIDEO.18.Filtrado.video.de.anabel.angus.y.marco.antelo.full.video
|
retrfn
| 2025-06-22T05:27:32Z | 0 | 0 | null |
[
"region:us"
] | null | 2025-06-22T05:24:01Z |
<a href="https://allyoutubers.com/VIDEO-18-Filtrado-video-de-anabel-angus-y-marco-antelo-full-video"> 🌐 VIDEO.18.Filtrado.video.de.anabel.angus.y.marco.antelo.full.video
🔴 ➤►DOWNLOAD👉👉🟢 ➤ <a href="https://allyoutubers.com/VIDEO-18-Filtrado-video-de-anabel-angus-y-marco-antelo-full-video"> 🌐 VIDEO.18.Filtrado.video.de.anabel.angus.y.marco.antelo.full.video
<a href="https://allyoutubers.com/VIDEO-18-Filtrado-video-de-anabel-angus-y-marco-antelo-full-video"> 🌐 VIDEO.18.Filtrado.video.de.anabel.angus.y.marco.antelo.full.video
🔴 ➤►DOWNLOAD👉👉🟢 ➤ <a href="https://allyoutubers.com/VIDEO-18-Filtrado-video-de-anabel-angus-y-marco-antelo-full-video"> 🌐 VIDEO.18.Filtrado.video.de.anabel.angus.y.marco.antelo.full.video
|
MJ92/AceGPT-v2-8B-Chat_finetuned_50
|
MJ92
| 2025-06-22T05:17:52Z | 0 | 0 |
transformers
|
[
"transformers",
"safetensors",
"llama",
"text-generation",
"arxiv:1910.09700",
"autotrain_compatible",
"text-generation-inference",
"endpoints_compatible",
"region:us"
] |
text-generation
| 2025-06-22T05:05:57Z |
---
library_name: transformers
tags: []
---
# Model Card for Model ID
<!-- Provide a quick summary of what the model is/does. -->
## Model Details
### Model Description
<!-- Provide a longer summary of what this model is. -->
This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated.
- **Developed by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Model type:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
- **Finetuned from model [optional]:** [More Information Needed]
### Model Sources [optional]
<!-- Provide the basic links for the model. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
### Direct Use
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
[More Information Needed]
### Downstream Use [optional]
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
## How to Get Started with the Model
Use the code below to get started with the model.
[More Information Needed]
## Training Details
### Training Data
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
[More Information Needed]
### Training Procedure
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
#### Preprocessing [optional]
[More Information Needed]
#### Training Hyperparameters
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
#### Speeds, Sizes, Times [optional]
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
[More Information Needed]
## Evaluation
<!-- This section describes the evaluation protocols and provides the results. -->
### Testing Data, Factors & Metrics
#### Testing Data
<!-- This should link to a Dataset Card if possible. -->
[More Information Needed]
#### Factors
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
[More Information Needed]
#### Metrics
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
[More Information Needed]
### Results
[More Information Needed]
#### Summary
## Model Examination [optional]
<!-- Relevant interpretability work for the model goes here -->
[More Information Needed]
## Environmental Impact
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
- **Hardware Type:** [More Information Needed]
- **Hours used:** [More Information Needed]
- **Cloud Provider:** [More Information Needed]
- **Compute Region:** [More Information Needed]
- **Carbon Emitted:** [More Information Needed]
## Technical Specifications [optional]
### Model Architecture and Objective
[More Information Needed]
### Compute Infrastructure
[More Information Needed]
#### Hardware
[More Information Needed]
#### Software
[More Information Needed]
## Citation [optional]
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Model Card Authors [optional]
[More Information Needed]
## Model Card Contact
[More Information Needed]
|
UnarineLeo/nllb_eng_ven_terms
|
UnarineLeo
| 2025-06-22T05:17:08Z | 0 | 0 |
transformers
|
[
"transformers",
"arxiv:1910.09700",
"endpoints_compatible",
"region:us"
] | null | 2025-06-22T05:17:05Z |
---
library_name: transformers
tags: []
---
# Model Card for Model ID
<!-- Provide a quick summary of what the model is/does. -->
## Model Details
### Model Description
<!-- Provide a longer summary of what this model is. -->
This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated.
- **Developed by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Model type:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
- **Finetuned from model [optional]:** [More Information Needed]
### Model Sources [optional]
<!-- Provide the basic links for the model. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
### Direct Use
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
[More Information Needed]
### Downstream Use [optional]
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
## How to Get Started with the Model
Use the code below to get started with the model.
[More Information Needed]
## Training Details
### Training Data
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
[More Information Needed]
### Training Procedure
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
#### Preprocessing [optional]
[More Information Needed]
#### Training Hyperparameters
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
#### Speeds, Sizes, Times [optional]
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
[More Information Needed]
## Evaluation
<!-- This section describes the evaluation protocols and provides the results. -->
### Testing Data, Factors & Metrics
#### Testing Data
<!-- This should link to a Dataset Card if possible. -->
[More Information Needed]
#### Factors
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
[More Information Needed]
#### Metrics
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
[More Information Needed]
### Results
[More Information Needed]
#### Summary
## Model Examination [optional]
<!-- Relevant interpretability work for the model goes here -->
[More Information Needed]
## Environmental Impact
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
- **Hardware Type:** [More Information Needed]
- **Hours used:** [More Information Needed]
- **Cloud Provider:** [More Information Needed]
- **Compute Region:** [More Information Needed]
- **Carbon Emitted:** [More Information Needed]
## Technical Specifications [optional]
### Model Architecture and Objective
[More Information Needed]
### Compute Infrastructure
[More Information Needed]
#### Hardware
[More Information Needed]
#### Software
[More Information Needed]
## Citation [optional]
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Model Card Authors [optional]
[More Information Needed]
## Model Card Contact
[More Information Needed]
|
ankitkp3200/Qwen2.5-0.5B-Instruct-Gensyn-Swarm-flapping_bellowing_wolf
|
ankitkp3200
| 2025-06-22T05:16:23Z | 14 | 0 |
transformers
|
[
"transformers",
"safetensors",
"qwen2",
"text-generation",
"generated_from_trainer",
"grpo",
"trl",
"I am flapping bellowing wolf",
"rl-swarm",
"gensyn",
"conversational",
"arxiv:2402.03300",
"base_model:unsloth/Qwen2.5-0.5B-Instruct",
"base_model:finetune:unsloth/Qwen2.5-0.5B-Instruct",
"autotrain_compatible",
"text-generation-inference",
"endpoints_compatible",
"region:us"
] |
text-generation
| 2025-05-29T15:27:10Z |
---
base_model: unsloth/Qwen2.5-0.5B-Instruct
library_name: transformers
model_name: Qwen2.5-0.5B-Instruct-Gensyn-Swarm-flapping_bellowing_wolf
tags:
- generated_from_trainer
- grpo
- trl
- I am flapping bellowing wolf
- rl-swarm
- gensyn
licence: license
---
# Model Card for Qwen2.5-0.5B-Instruct-Gensyn-Swarm-flapping_bellowing_wolf
This model is a fine-tuned version of [unsloth/Qwen2.5-0.5B-Instruct](https://huggingface.co/unsloth/Qwen2.5-0.5B-Instruct).
It has been trained using [TRL](https://github.com/huggingface/trl).
## Quick start
```python
from transformers import pipeline
question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
generator = pipeline("text-generation", model="ankitkp3200/Qwen2.5-0.5B-Instruct-Gensyn-Swarm-flapping_bellowing_wolf", device="cuda")
output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
print(output["generated_text"])
```
## Training procedure
This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
### Framework versions
- TRL: 0.19.0
- Transformers: 4.52.4
- Pytorch: 2.7.1
- Datasets: 3.6.0
- Tokenizers: 0.21.1
## Citations
Cite GRPO as:
```bibtex
@article{zhihong2024deepseekmath,
title = {{DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models}},
author = {Zhihong Shao and Peiyi Wang and Qihao Zhu and Runxin Xu and Junxiao Song and Mingchuan Zhang and Y. K. Li and Y. Wu and Daya Guo},
year = 2024,
eprint = {arXiv:2402.03300},
}
```
Cite TRL as:
```bibtex
@misc{vonwerra2022trl,
title = {{TRL: Transformer Reinforcement Learning}},
author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallou{\'e}dec},
year = 2020,
journal = {GitHub repository},
publisher = {GitHub},
howpublished = {\url{https://github.com/huggingface/trl}}
}
```
|
Triangle104/Josiefied-Qwen3-30B-A3B-abliterated-v2-Q4_K_M-GGUF
|
Triangle104
| 2025-06-22T05:14:02Z | 0 | 0 | null |
[
"gguf",
"chat",
"llama-cpp",
"gguf-my-repo",
"text-generation",
"base_model:Goekdeniz-Guelmez/Josiefied-Qwen3-30B-A3B-abliterated-v2",
"base_model:quantized:Goekdeniz-Guelmez/Josiefied-Qwen3-30B-A3B-abliterated-v2",
"endpoints_compatible",
"region:us"
] |
text-generation
| 2025-06-22T05:03:41Z |
---
tags:
- chat
- llama-cpp
- gguf-my-repo
base_model: Goekdeniz-Guelmez/Josiefied-Qwen3-30B-A3B-abliterated-v2
pipeline_tag: text-generation
---
# Triangle104/Josiefied-Qwen3-30B-A3B-abliterated-v2-Q4_K_M-GGUF
This model was converted to GGUF format from [`Goekdeniz-Guelmez/Josiefied-Qwen3-30B-A3B-abliterated-v2`](https://huggingface.co/Goekdeniz-Guelmez/Josiefied-Qwen3-30B-A3B-abliterated-v2) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space.
Refer to the [original model card](https://huggingface.co/Goekdeniz-Guelmez/Josiefied-Qwen3-30B-A3B-abliterated-v2) for more details on the model.
---
The JOSIEFIED model family represents a series of highly advanced language models built upon renowned architectures such as Alibaba’s Qwen2/2.5/3, Google’s Gemma3, and Meta’s LLaMA 3/4. Covering sizes from 0.5B to 32B parameters, these models have been significantly modified (“abliterated”) and further fine-tuned to maximize uncensored behavior without compromising tool usage or instruction-following abilities.
Despite their rebellious spirit, the JOSIEFIED models often outperform their base counterparts on standard benchmarks — delivering both raw power and utility.
These models are intended for advanced users who require unrestricted, high-performance language generation.
---
## Use with llama.cpp
Install llama.cpp through brew (works on Mac and Linux)
```bash
brew install llama.cpp
```
Invoke the llama.cpp server or the CLI.
### CLI:
```bash
llama-cli --hf-repo Triangle104/Josiefied-Qwen3-30B-A3B-abliterated-v2-Q4_K_M-GGUF --hf-file josiefied-qwen3-30b-a3b-abliterated-v2-q4_k_m.gguf -p "The meaning to life and the universe is"
```
### Server:
```bash
llama-server --hf-repo Triangle104/Josiefied-Qwen3-30B-A3B-abliterated-v2-Q4_K_M-GGUF --hf-file josiefied-qwen3-30b-a3b-abliterated-v2-q4_k_m.gguf -c 2048
```
Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well.
Step 1: Clone llama.cpp from GitHub.
```
git clone https://github.com/ggerganov/llama.cpp
```
Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux).
```
cd llama.cpp && LLAMA_CURL=1 make
```
Step 3: Run inference through the main binary.
```
./llama-cli --hf-repo Triangle104/Josiefied-Qwen3-30B-A3B-abliterated-v2-Q4_K_M-GGUF --hf-file josiefied-qwen3-30b-a3b-abliterated-v2-q4_k_m.gguf -p "The meaning to life and the universe is"
```
or
```
./llama-server --hf-repo Triangle104/Josiefied-Qwen3-30B-A3B-abliterated-v2-Q4_K_M-GGUF --hf-file josiefied-qwen3-30b-a3b-abliterated-v2-q4_k_m.gguf -c 2048
```
|
amai-gsu/pythia-70m-Q4_0-GGUF
|
amai-gsu
| 2025-06-22T05:08:04Z | 0 | 0 |
gpt-neox
|
[
"gpt-neox",
"gguf",
"pytorch",
"causal-lm",
"pythia",
"llama-cpp",
"gguf-my-repo",
"en",
"dataset:EleutherAI/pile",
"base_model:EleutherAI/pythia-70m",
"base_model:quantized:EleutherAI/pythia-70m",
"license:apache-2.0",
"endpoints_compatible",
"region:us"
] | null | 2025-06-22T05:08:02Z |
---
language:
- en
tags:
- pytorch
- causal-lm
- pythia
- llama-cpp
- gguf-my-repo
license: apache-2.0
datasets:
- EleutherAI/pile
library_name: gpt-neox
base_model: EleutherAI/pythia-70m
---
# amai-gsu/pythia-70m-Q4_0-GGUF
This model was converted to GGUF format from [`EleutherAI/pythia-70m`](https://huggingface.co/EleutherAI/pythia-70m) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space.
Refer to the [original model card](https://huggingface.co/EleutherAI/pythia-70m) for more details on the model.
## Use with llama.cpp
Install llama.cpp through brew (works on Mac and Linux)
```bash
brew install llama.cpp
```
Invoke the llama.cpp server or the CLI.
### CLI:
```bash
llama-cli --hf-repo amai-gsu/pythia-70m-Q4_0-GGUF --hf-file pythia-70m-q4_0.gguf -p "The meaning to life and the universe is"
```
### Server:
```bash
llama-server --hf-repo amai-gsu/pythia-70m-Q4_0-GGUF --hf-file pythia-70m-q4_0.gguf -c 2048
```
Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well.
Step 1: Clone llama.cpp from GitHub.
```
git clone https://github.com/ggerganov/llama.cpp
```
Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux).
```
cd llama.cpp && LLAMA_CURL=1 make
```
Step 3: Run inference through the main binary.
```
./llama-cli --hf-repo amai-gsu/pythia-70m-Q4_0-GGUF --hf-file pythia-70m-q4_0.gguf -p "The meaning to life and the universe is"
```
or
```
./llama-server --hf-repo amai-gsu/pythia-70m-Q4_0-GGUF --hf-file pythia-70m-q4_0.gguf -c 2048
```
|
tokennext/llama-3-8b-elyza-ja-werewolf-awq
|
tokennext
| 2025-06-22T05:02:17Z | 0 | 0 |
transformers
|
[
"transformers",
"safetensors",
"llama",
"text-generation",
"conversational",
"arxiv:1910.09700",
"autotrain_compatible",
"text-generation-inference",
"endpoints_compatible",
"4-bit",
"awq",
"region:us"
] |
text-generation
| 2025-06-22T01:47:52Z |
---
library_name: transformers
tags: []
---
# Model Card for Model ID
<!-- Provide a quick summary of what the model is/does. -->
## Model Details
### Model Description
<!-- Provide a longer summary of what this model is. -->
This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated.
- **Developed by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Model type:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
- **Finetuned from model [optional]:** [More Information Needed]
### Model Sources [optional]
<!-- Provide the basic links for the model. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
### Direct Use
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
[More Information Needed]
### Downstream Use [optional]
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
## How to Get Started with the Model
Use the code below to get started with the model.
[More Information Needed]
## Training Details
### Training Data
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
[More Information Needed]
### Training Procedure
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
#### Preprocessing [optional]
[More Information Needed]
#### Training Hyperparameters
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
#### Speeds, Sizes, Times [optional]
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
[More Information Needed]
## Evaluation
<!-- This section describes the evaluation protocols and provides the results. -->
### Testing Data, Factors & Metrics
#### Testing Data
<!-- This should link to a Dataset Card if possible. -->
[More Information Needed]
#### Factors
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
[More Information Needed]
#### Metrics
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
[More Information Needed]
### Results
[More Information Needed]
#### Summary
## Model Examination [optional]
<!-- Relevant interpretability work for the model goes here -->
[More Information Needed]
## Environmental Impact
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
- **Hardware Type:** [More Information Needed]
- **Hours used:** [More Information Needed]
- **Cloud Provider:** [More Information Needed]
- **Compute Region:** [More Information Needed]
- **Carbon Emitted:** [More Information Needed]
## Technical Specifications [optional]
### Model Architecture and Objective
[More Information Needed]
### Compute Infrastructure
[More Information Needed]
#### Hardware
[More Information Needed]
#### Software
[More Information Needed]
## Citation [optional]
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Model Card Authors [optional]
[More Information Needed]
## Model Card Contact
[More Information Needed]
|
mradermacher/GLM-4-32B-0414-antislop-i1-GGUF
|
mradermacher
| 2025-06-22T05:00:07Z | 0 | 0 |
transformers
|
[
"transformers",
"gguf",
"en",
"base_model:sam-paech/GLM-4-32B-0414-antislop",
"base_model:quantized:sam-paech/GLM-4-32B-0414-antislop",
"endpoints_compatible",
"region:us",
"imatrix"
] | null | 2025-06-22T02:03:57Z |
---
base_model: sam-paech/GLM-4-32B-0414-antislop
language:
- en
library_name: transformers
quantized_by: mradermacher
---
## About
<!-- ### quantize_version: 2 -->
<!-- ### output_tensor_quantised: 1 -->
<!-- ### convert_type: hf -->
<!-- ### vocab_type: -->
<!-- ### tags: nicoboss -->
weighted/imatrix quants of https://huggingface.co/sam-paech/GLM-4-32B-0414-antislop
<!-- provided-files -->
static quants are available at https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-GGUF
## Usage
If you are unsure how to use GGUF files, refer to one of [TheBloke's
READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for
more details, including on how to concatenate multi-part files.
## Provided Quants
(sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants)
| Link | Type | Size/GB | Notes |
|:-----|:-----|--------:|:------|
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-i1-GGUF/resolve/main/GLM-4-32B-0414-antislop.i1-IQ1_S.gguf) | i1-IQ1_S | 7.4 | for the desperate |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-i1-GGUF/resolve/main/GLM-4-32B-0414-antislop.i1-IQ1_M.gguf) | i1-IQ1_M | 8.0 | mostly desperate |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-i1-GGUF/resolve/main/GLM-4-32B-0414-antislop.i1-IQ2_XXS.gguf) | i1-IQ2_XXS | 9.1 | |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-i1-GGUF/resolve/main/GLM-4-32B-0414-antislop.i1-IQ2_XS.gguf) | i1-IQ2_XS | 10.0 | |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-i1-GGUF/resolve/main/GLM-4-32B-0414-antislop.i1-IQ2_S.gguf) | i1-IQ2_S | 10.5 | |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-i1-GGUF/resolve/main/GLM-4-32B-0414-antislop.i1-IQ2_M.gguf) | i1-IQ2_M | 11.4 | |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-i1-GGUF/resolve/main/GLM-4-32B-0414-antislop.i1-Q2_K_S.gguf) | i1-Q2_K_S | 11.5 | very low quality |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-i1-GGUF/resolve/main/GLM-4-32B-0414-antislop.i1-Q2_K.gguf) | i1-Q2_K | 12.4 | IQ3_XXS probably better |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-i1-GGUF/resolve/main/GLM-4-32B-0414-antislop.i1-IQ3_XXS.gguf) | i1-IQ3_XXS | 12.9 | lower quality |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-i1-GGUF/resolve/main/GLM-4-32B-0414-antislop.i1-IQ3_XS.gguf) | i1-IQ3_XS | 13.8 | |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-i1-GGUF/resolve/main/GLM-4-32B-0414-antislop.i1-Q3_K_S.gguf) | i1-Q3_K_S | 14.5 | IQ3_XS probably better |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-i1-GGUF/resolve/main/GLM-4-32B-0414-antislop.i1-IQ3_S.gguf) | i1-IQ3_S | 14.5 | beats Q3_K* |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-i1-GGUF/resolve/main/GLM-4-32B-0414-antislop.i1-IQ3_M.gguf) | i1-IQ3_M | 14.9 | |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-i1-GGUF/resolve/main/GLM-4-32B-0414-antislop.i1-Q3_K_M.gguf) | i1-Q3_K_M | 16.0 | IQ3_S probably better |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-i1-GGUF/resolve/main/GLM-4-32B-0414-antislop.i1-Q3_K_L.gguf) | i1-Q3_K_L | 17.3 | IQ3_M probably better |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-i1-GGUF/resolve/main/GLM-4-32B-0414-antislop.i1-IQ4_XS.gguf) | i1-IQ4_XS | 17.7 | |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-i1-GGUF/resolve/main/GLM-4-32B-0414-antislop.i1-Q4_0.gguf) | i1-Q4_0 | 18.7 | fast, low quality |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-i1-GGUF/resolve/main/GLM-4-32B-0414-antislop.i1-Q4_K_S.gguf) | i1-Q4_K_S | 18.8 | optimal size/speed/quality |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-i1-GGUF/resolve/main/GLM-4-32B-0414-antislop.i1-Q4_K_M.gguf) | i1-Q4_K_M | 19.8 | fast, recommended |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-i1-GGUF/resolve/main/GLM-4-32B-0414-antislop.i1-Q4_1.gguf) | i1-Q4_1 | 20.7 | |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-i1-GGUF/resolve/main/GLM-4-32B-0414-antislop.i1-Q5_K_S.gguf) | i1-Q5_K_S | 22.6 | |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-i1-GGUF/resolve/main/GLM-4-32B-0414-antislop.i1-Q5_K_M.gguf) | i1-Q5_K_M | 23.2 | |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-i1-GGUF/resolve/main/GLM-4-32B-0414-antislop.i1-Q6_K.gguf) | i1-Q6_K | 26.8 | practically like static Q6_K |
Here is a handy graph by ikawrakow comparing some lower-quality quant
types (lower is better):

And here are Artefact2's thoughts on the matter:
https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9
## FAQ / Model Request
See https://huggingface.co/mradermacher/model_requests for some answers to
questions you might have and/or if you want some other model quantized.
## Thanks
I thank my company, [nethype GmbH](https://www.nethype.de/), for letting
me use its servers and providing upgrades to my workstation to enable
this work in my free time. Additional thanks to [@nicoboss](https://huggingface.co/nicoboss) for giving me access to his private supercomputer, enabling me to provide many more imatrix quants, at much higher quality, than I would otherwise be able to.
<!-- end -->
|
aisha-org/AIsha-STT-V2-ckpt-9
|
aisha-org
| 2025-06-22T04:59:07Z | 0 | 0 |
transformers
|
[
"transformers",
"safetensors",
"whisper",
"automatic-speech-recognition",
"arxiv:1910.09700",
"endpoints_compatible",
"region:us"
] |
automatic-speech-recognition
| 2025-06-22T04:56:41Z |
---
library_name: transformers
tags: []
---
# Model Card for Model ID
<!-- Provide a quick summary of what the model is/does. -->
## Model Details
### Model Description
<!-- Provide a longer summary of what this model is. -->
This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated.
- **Developed by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Model type:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
- **Finetuned from model [optional]:** [More Information Needed]
### Model Sources [optional]
<!-- Provide the basic links for the model. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
### Direct Use
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
[More Information Needed]
### Downstream Use [optional]
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
## How to Get Started with the Model
Use the code below to get started with the model.
[More Information Needed]
## Training Details
### Training Data
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
[More Information Needed]
### Training Procedure
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
#### Preprocessing [optional]
[More Information Needed]
#### Training Hyperparameters
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
#### Speeds, Sizes, Times [optional]
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
[More Information Needed]
## Evaluation
<!-- This section describes the evaluation protocols and provides the results. -->
### Testing Data, Factors & Metrics
#### Testing Data
<!-- This should link to a Dataset Card if possible. -->
[More Information Needed]
#### Factors
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
[More Information Needed]
#### Metrics
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
[More Information Needed]
### Results
[More Information Needed]
#### Summary
## Model Examination [optional]
<!-- Relevant interpretability work for the model goes here -->
[More Information Needed]
## Environmental Impact
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
- **Hardware Type:** [More Information Needed]
- **Hours used:** [More Information Needed]
- **Cloud Provider:** [More Information Needed]
- **Compute Region:** [More Information Needed]
- **Carbon Emitted:** [More Information Needed]
## Technical Specifications [optional]
### Model Architecture and Objective
[More Information Needed]
### Compute Infrastructure
[More Information Needed]
#### Hardware
[More Information Needed]
#### Software
[More Information Needed]
## Citation [optional]
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Model Card Authors [optional]
[More Information Needed]
## Model Card Contact
[More Information Needed]
|
MJ92/AceGPT-v2-8B-Chat_finetuned_500_en1
|
MJ92
| 2025-06-22T04:54:54Z | 0 | 0 |
transformers
|
[
"transformers",
"safetensors",
"llama",
"text-generation",
"arxiv:1910.09700",
"autotrain_compatible",
"text-generation-inference",
"endpoints_compatible",
"region:us"
] |
text-generation
| 2025-06-22T04:36:11Z |
---
library_name: transformers
tags: []
---
# Model Card for Model ID
<!-- Provide a quick summary of what the model is/does. -->
## Model Details
### Model Description
<!-- Provide a longer summary of what this model is. -->
This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated.
- **Developed by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Model type:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
- **Finetuned from model [optional]:** [More Information Needed]
### Model Sources [optional]
<!-- Provide the basic links for the model. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
### Direct Use
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
[More Information Needed]
### Downstream Use [optional]
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
## How to Get Started with the Model
Use the code below to get started with the model.
[More Information Needed]
## Training Details
### Training Data
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
[More Information Needed]
### Training Procedure
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
#### Preprocessing [optional]
[More Information Needed]
#### Training Hyperparameters
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
#### Speeds, Sizes, Times [optional]
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
[More Information Needed]
## Evaluation
<!-- This section describes the evaluation protocols and provides the results. -->
### Testing Data, Factors & Metrics
#### Testing Data
<!-- This should link to a Dataset Card if possible. -->
[More Information Needed]
#### Factors
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
[More Information Needed]
#### Metrics
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
[More Information Needed]
### Results
[More Information Needed]
#### Summary
## Model Examination [optional]
<!-- Relevant interpretability work for the model goes here -->
[More Information Needed]
## Environmental Impact
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
- **Hardware Type:** [More Information Needed]
- **Hours used:** [More Information Needed]
- **Cloud Provider:** [More Information Needed]
- **Compute Region:** [More Information Needed]
- **Carbon Emitted:** [More Information Needed]
## Technical Specifications [optional]
### Model Architecture and Objective
[More Information Needed]
### Compute Infrastructure
[More Information Needed]
#### Hardware
[More Information Needed]
#### Software
[More Information Needed]
## Citation [optional]
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Model Card Authors [optional]
[More Information Needed]
## Model Card Contact
[More Information Needed]
|
mradermacher/GLM-4-32B-0414-antislop-GGUF
|
mradermacher
| 2025-06-22T04:50:58Z | 0 | 0 |
transformers
|
[
"transformers",
"gguf",
"en",
"base_model:sam-paech/GLM-4-32B-0414-antislop",
"base_model:quantized:sam-paech/GLM-4-32B-0414-antislop",
"endpoints_compatible",
"region:us"
] | null | 2025-06-21T18:09:08Z |
---
base_model: sam-paech/GLM-4-32B-0414-antislop
language:
- en
library_name: transformers
quantized_by: mradermacher
---
## About
<!-- ### quantize_version: 2 -->
<!-- ### output_tensor_quantised: 1 -->
<!-- ### convert_type: hf -->
<!-- ### vocab_type: -->
<!-- ### tags: -->
static quants of https://huggingface.co/sam-paech/GLM-4-32B-0414-antislop
<!-- provided-files -->
weighted/imatrix quants are available at https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-i1-GGUF
## Usage
If you are unsure how to use GGUF files, refer to one of [TheBloke's
READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for
more details, including on how to concatenate multi-part files.
## Provided Quants
(sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants)
| Link | Type | Size/GB | Notes |
|:-----|:-----|--------:|:------|
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-GGUF/resolve/main/GLM-4-32B-0414-antislop.Q2_K.gguf) | Q2_K | 12.4 | |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-GGUF/resolve/main/GLM-4-32B-0414-antislop.Q3_K_S.gguf) | Q3_K_S | 14.5 | |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-GGUF/resolve/main/GLM-4-32B-0414-antislop.Q3_K_M.gguf) | Q3_K_M | 16.0 | lower quality |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-GGUF/resolve/main/GLM-4-32B-0414-antislop.Q3_K_L.gguf) | Q3_K_L | 17.3 | |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-GGUF/resolve/main/GLM-4-32B-0414-antislop.IQ4_XS.gguf) | IQ4_XS | 17.9 | |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-GGUF/resolve/main/GLM-4-32B-0414-antislop.Q4_K_S.gguf) | Q4_K_S | 18.8 | fast, recommended |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-GGUF/resolve/main/GLM-4-32B-0414-antislop.Q4_K_M.gguf) | Q4_K_M | 19.8 | fast, recommended |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-GGUF/resolve/main/GLM-4-32B-0414-antislop.Q5_K_S.gguf) | Q5_K_S | 22.6 | |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-GGUF/resolve/main/GLM-4-32B-0414-antislop.Q5_K_M.gguf) | Q5_K_M | 23.2 | |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-GGUF/resolve/main/GLM-4-32B-0414-antislop.Q6_K.gguf) | Q6_K | 26.8 | very good quality |
| [GGUF](https://huggingface.co/mradermacher/GLM-4-32B-0414-antislop-GGUF/resolve/main/GLM-4-32B-0414-antislop.Q8_0.gguf) | Q8_0 | 34.7 | fast, best quality |
Here is a handy graph by ikawrakow comparing some lower-quality quant
types (lower is better):

And here are Artefact2's thoughts on the matter:
https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9
## FAQ / Model Request
See https://huggingface.co/mradermacher/model_requests for some answers to
questions you might have and/or if you want some other model quantized.
## Thanks
I thank my company, [nethype GmbH](https://www.nethype.de/), for letting
me use its servers and providing upgrades to my workstation to enable
this work in my free time.
<!-- end -->
|
tscstudios/a0upnxkfweacptuwwnnphjmsnxu2_88fae8d8-067e-4c61-b6d5-b6e380425556
|
tscstudios
| 2025-06-22T04:49:43Z | 0 | 0 |
diffusers
|
[
"diffusers",
"flux",
"lora",
"replicate",
"text-to-image",
"en",
"base_model:black-forest-labs/FLUX.1-dev",
"base_model:adapter:black-forest-labs/FLUX.1-dev",
"license:other",
"region:us"
] |
text-to-image
| 2025-06-22T04:49:41Z |
---
license: other
license_name: flux-1-dev-non-commercial-license
license_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md
language:
- en
tags:
- flux
- diffusers
- lora
- replicate
base_model: "black-forest-labs/FLUX.1-dev"
pipeline_tag: text-to-image
# widget:
# - text: >-
# prompt
# output:
# url: https://...
instance_prompt: TOK
---
# A0Upnxkfweacptuwwnnphjmsnxu2_88Fae8D8 067E 4C61 B6D5 B6E380425556
<Gallery />
## About this LoRA
This is a [LoRA](https://replicate.com/docs/guides/working-with-loras) for the FLUX.1-dev text-to-image model. It can be used with diffusers or ComfyUI.
It was trained on [Replicate](https://replicate.com/) using AI toolkit: https://replicate.com/ostris/flux-dev-lora-trainer/train
## Trigger words
You should use `TOK` to trigger the image generation.
## Run this LoRA with an API using Replicate
```py
import replicate
input = {
"prompt": "TOK",
"lora_weights": "https://huggingface.co/tscstudios/a0upnxkfweacptuwwnnphjmsnxu2_88fae8d8-067e-4c61-b6d5-b6e380425556/resolve/main/lora.safetensors"
}
output = replicate.run(
"black-forest-labs/flux-dev-lora",
input=input
)
for index, item in enumerate(output):
with open(f"output_{index}.webp", "wb") as file:
file.write(item.read())
```
## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers)
```py
from diffusers import AutoPipelineForText2Image
import torch
pipeline = AutoPipelineForText2Image.from_pretrained('black-forest-labs/FLUX.1-dev', torch_dtype=torch.float16).to('cuda')
pipeline.load_lora_weights('tscstudios/a0upnxkfweacptuwwnnphjmsnxu2_88fae8d8-067e-4c61-b6d5-b6e380425556', weight_name='lora.safetensors')
image = pipeline('TOK').images[0]
```
For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters)
## Training details
- Steps: 2000
- Learning rate: 0.0004
- LoRA rank: 16
## Contribute your own examples
You can use the [community tab](https://huggingface.co/tscstudios/a0upnxkfweacptuwwnnphjmsnxu2_88fae8d8-067e-4c61-b6d5-b6e380425556/discussions) to add images that show off what you’ve made with this LoRA.
|
BootesVoid/cmc74fjb108d1bfiftt94is2x_cmc74w8v908dlbfif8t0tnx2i
|
BootesVoid
| 2025-06-22T04:44:06Z | 0 | 0 |
diffusers
|
[
"diffusers",
"flux",
"lora",
"replicate",
"text-to-image",
"en",
"base_model:black-forest-labs/FLUX.1-dev",
"base_model:adapter:black-forest-labs/FLUX.1-dev",
"license:other",
"region:us"
] |
text-to-image
| 2025-06-22T04:44:05Z |
---
license: other
license_name: flux-1-dev-non-commercial-license
license_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md
language:
- en
tags:
- flux
- diffusers
- lora
- replicate
base_model: "black-forest-labs/FLUX.1-dev"
pipeline_tag: text-to-image
# widget:
# - text: >-
# prompt
# output:
# url: https://...
instance_prompt: STACKED
---
# Cmc74Fjb108D1Bfiftt94Is2X_Cmc74W8V908Dlbfif8T0Tnx2I
<Gallery />
## About this LoRA
This is a [LoRA](https://replicate.com/docs/guides/working-with-loras) for the FLUX.1-dev text-to-image model. It can be used with diffusers or ComfyUI.
It was trained on [Replicate](https://replicate.com/) using AI toolkit: https://replicate.com/ostris/flux-dev-lora-trainer/train
## Trigger words
You should use `STACKED` to trigger the image generation.
## Run this LoRA with an API using Replicate
```py
import replicate
input = {
"prompt": "STACKED",
"lora_weights": "https://huggingface.co/BootesVoid/cmc74fjb108d1bfiftt94is2x_cmc74w8v908dlbfif8t0tnx2i/resolve/main/lora.safetensors"
}
output = replicate.run(
"black-forest-labs/flux-dev-lora",
input=input
)
for index, item in enumerate(output):
with open(f"output_{index}.webp", "wb") as file:
file.write(item.read())
```
## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers)
```py
from diffusers import AutoPipelineForText2Image
import torch
pipeline = AutoPipelineForText2Image.from_pretrained('black-forest-labs/FLUX.1-dev', torch_dtype=torch.float16).to('cuda')
pipeline.load_lora_weights('BootesVoid/cmc74fjb108d1bfiftt94is2x_cmc74w8v908dlbfif8t0tnx2i', weight_name='lora.safetensors')
image = pipeline('STACKED').images[0]
```
For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters)
## Training details
- Steps: 2000
- Learning rate: 0.0004
- LoRA rank: 16
## Contribute your own examples
You can use the [community tab](https://huggingface.co/BootesVoid/cmc74fjb108d1bfiftt94is2x_cmc74w8v908dlbfif8t0tnx2i/discussions) to add images that show off what you’ve made with this LoRA.
|
viral-Pakcricketinfo-Sapna-Shah-Hd/Hot.VIDEO.Pakcricketinfo.Sapna.Shah.Viral.Video.On.Social.Media.Link
|
viral-Pakcricketinfo-Sapna-Shah-Hd
| 2025-06-22T04:33:19Z | 0 | 0 | null |
[
"region:us"
] | null | 2025-06-22T04:32:48Z |
<a href="https://tinyurl.com/5aaruyax" rel="nofollow" data-target="animated-image.originalLink"><img src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" alt="WATCH Videos" data-canonical-src="https://i.imgur.com/dJHk4Zq.gif" style="max-width: 100%; display: inline-block;" data-target="animated-image.originalImage"></a>
|
BootesVoid/cmbr6jsqz033mh4x5ml4td24l_cmc754hl508dxbfif3mucygbv
|
BootesVoid
| 2025-06-22T04:28:57Z | 0 | 0 |
diffusers
|
[
"diffusers",
"flux",
"lora",
"replicate",
"text-to-image",
"en",
"base_model:black-forest-labs/FLUX.1-dev",
"base_model:adapter:black-forest-labs/FLUX.1-dev",
"license:other",
"region:us"
] |
text-to-image
| 2025-06-22T04:28:56Z |
---
license: other
license_name: flux-1-dev-non-commercial-license
license_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md
language:
- en
tags:
- flux
- diffusers
- lora
- replicate
base_model: "black-forest-labs/FLUX.1-dev"
pipeline_tag: text-to-image
# widget:
# - text: >-
# prompt
# output:
# url: https://...
instance_prompt: SEXY
---
# Cmbr6Jsqz033Mh4X5Ml4Td24L_Cmc754Hl508Dxbfif3Mucygbv
<Gallery />
## About this LoRA
This is a [LoRA](https://replicate.com/docs/guides/working-with-loras) for the FLUX.1-dev text-to-image model. It can be used with diffusers or ComfyUI.
It was trained on [Replicate](https://replicate.com/) using AI toolkit: https://replicate.com/ostris/flux-dev-lora-trainer/train
## Trigger words
You should use `SEXY` to trigger the image generation.
## Run this LoRA with an API using Replicate
```py
import replicate
input = {
"prompt": "SEXY",
"lora_weights": "https://huggingface.co/BootesVoid/cmbr6jsqz033mh4x5ml4td24l_cmc754hl508dxbfif3mucygbv/resolve/main/lora.safetensors"
}
output = replicate.run(
"black-forest-labs/flux-dev-lora",
input=input
)
for index, item in enumerate(output):
with open(f"output_{index}.webp", "wb") as file:
file.write(item.read())
```
## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers)
```py
from diffusers import AutoPipelineForText2Image
import torch
pipeline = AutoPipelineForText2Image.from_pretrained('black-forest-labs/FLUX.1-dev', torch_dtype=torch.float16).to('cuda')
pipeline.load_lora_weights('BootesVoid/cmbr6jsqz033mh4x5ml4td24l_cmc754hl508dxbfif3mucygbv', weight_name='lora.safetensors')
image = pipeline('SEXY').images[0]
```
For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters)
## Training details
- Steps: 2000
- Learning rate: 0.0004
- LoRA rank: 16
## Contribute your own examples
You can use the [community tab](https://huggingface.co/BootesVoid/cmbr6jsqz033mh4x5ml4td24l_cmc754hl508dxbfif3mucygbv/discussions) to add images that show off what you’ve made with this LoRA.
|
augustus2011/atsui_umasume_lora
|
augustus2011
| 2025-06-22T04:28:04Z | 0 | 0 |
transformers
|
[
"transformers",
"safetensors",
"qwen3",
"text-generation",
"text-generation-inference",
"unsloth",
"conversational",
"en",
"base_model:unsloth/Qwen3-8B",
"base_model:finetune:unsloth/Qwen3-8B",
"license:apache-2.0",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] |
text-generation
| 2025-06-22T04:25:19Z |
---
base_model: unsloth/Qwen3-8B
tags:
- text-generation-inference
- transformers
- unsloth
- qwen3
license: apache-2.0
language:
- en
---
# Uploaded finetuned model
- **Developed by:** augustus2011
- **License:** apache-2.0
- **Finetuned from model :** unsloth/Qwen3-8B
This qwen3 model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
[<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
|
VIDEO-mezzo-fun-viral-video-Clips-tv/18.FULL.VIDEO.mezzo.fun.viral.video.Link.viral.On.Social.Media
|
VIDEO-mezzo-fun-viral-video-Clips-tv
| 2025-06-22T04:27:50Z | 0 | 0 | null |
[
"region:us"
] | null | 2025-06-22T04:27:31Z |
<animated-image data-catalyst=""><a href="https://tinyurl.com/5ye5v3bc?dfhgKasbonStudiosdfg" rel="nofollow" data-target="animated-image.originalLink"><img src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" alt="Foo" data-canonical-src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" style="max-width: 100%; display: inline-block;" data-target="animated-image.originalImage"></a>
<animated-image data-catalyst=""><a href="https://tinyurl.com/5ye5v3bc?dfhgKasbonStudiosdfg" rel="nofollow" data-target="animated-image.originalLink"><img src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" alt="Foo" data-canonical-src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" style="max-width: 100%; display: inline-block;" data-target="animated-image.originalImage"></a>
|
Ppear/Qwen2-0.5B-GRPO-test
|
Ppear
| 2025-06-22T04:25:21Z | 0 | 0 |
transformers
|
[
"transformers",
"tensorboard",
"safetensors",
"generated_from_trainer",
"trl",
"grpo",
"dataset:AI-MO/NuminaMath-TIR",
"arxiv:2402.03300",
"base_model:Qwen/Qwen2-0.5B-Instruct",
"base_model:finetune:Qwen/Qwen2-0.5B-Instruct",
"endpoints_compatible",
"region:us"
] | null | 2025-06-11T16:59:37Z |
---
base_model: Qwen/Qwen2-0.5B-Instruct
datasets: AI-MO/NuminaMath-TIR
library_name: transformers
model_name: Qwen2-0.5B-GRPO-test
tags:
- generated_from_trainer
- trl
- grpo
licence: license
---
# Model Card for Qwen2-0.5B-GRPO-test
This model is a fine-tuned version of [Qwen/Qwen2-0.5B-Instruct](https://huggingface.co/Qwen/Qwen2-0.5B-Instruct) on the [AI-MO/NuminaMath-TIR](https://huggingface.co/datasets/AI-MO/NuminaMath-TIR) dataset.
It has been trained using [TRL](https://github.com/huggingface/trl).
## Quick start
```python
from transformers import pipeline
question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
generator = pipeline("text-generation", model="Ppear/Qwen2-0.5B-GRPO-test", device="cuda")
output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
print(output["generated_text"])
```
## Training procedure
This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
### Framework versions
- TRL: 0.19.0
- Transformers: 4.52.4
- Pytorch: 2.7.1
- Datasets: 3.6.0
- Tokenizers: 0.21.1
## Citations
Cite GRPO as:
```bibtex
@article{zhihong2024deepseekmath,
title = {{DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models}},
author = {Zhihong Shao and Peiyi Wang and Qihao Zhu and Runxin Xu and Junxiao Song and Mingchuan Zhang and Y. K. Li and Y. Wu and Daya Guo},
year = 2024,
eprint = {arXiv:2402.03300},
}
```
Cite TRL as:
```bibtex
@misc{vonwerra2022trl,
title = {{TRL: Transformer Reinforcement Learning}},
author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallou{\'e}dec},
year = 2020,
journal = {GitHub repository},
publisher = {GitHub},
howpublished = {\url{https://github.com/huggingface/trl}}
}
```
|
zarjis/gen_model_pt3_full
|
zarjis
| 2025-06-22T04:22:06Z | 0 | 0 |
transformers
|
[
"transformers",
"safetensors",
"unsloth",
"arxiv:1910.09700",
"endpoints_compatible",
"region:us"
] | null | 2025-06-22T04:01:52Z |
---
library_name: transformers
tags:
- unsloth
---
# Model Card for Model ID
<!-- Provide a quick summary of what the model is/does. -->
## Model Details
### Model Description
<!-- Provide a longer summary of what this model is. -->
This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated.
- **Developed by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Model type:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
- **Finetuned from model [optional]:** [More Information Needed]
### Model Sources [optional]
<!-- Provide the basic links for the model. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
### Direct Use
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
[More Information Needed]
### Downstream Use [optional]
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
## How to Get Started with the Model
Use the code below to get started with the model.
[More Information Needed]
## Training Details
### Training Data
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
[More Information Needed]
### Training Procedure
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
#### Preprocessing [optional]
[More Information Needed]
#### Training Hyperparameters
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
#### Speeds, Sizes, Times [optional]
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
[More Information Needed]
## Evaluation
<!-- This section describes the evaluation protocols and provides the results. -->
### Testing Data, Factors & Metrics
#### Testing Data
<!-- This should link to a Dataset Card if possible. -->
[More Information Needed]
#### Factors
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
[More Information Needed]
#### Metrics
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
[More Information Needed]
### Results
[More Information Needed]
#### Summary
## Model Examination [optional]
<!-- Relevant interpretability work for the model goes here -->
[More Information Needed]
## Environmental Impact
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
- **Hardware Type:** [More Information Needed]
- **Hours used:** [More Information Needed]
- **Cloud Provider:** [More Information Needed]
- **Compute Region:** [More Information Needed]
- **Carbon Emitted:** [More Information Needed]
## Technical Specifications [optional]
### Model Architecture and Objective
[More Information Needed]
### Compute Infrastructure
[More Information Needed]
#### Hardware
[More Information Needed]
#### Software
[More Information Needed]
## Citation [optional]
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Model Card Authors [optional]
[More Information Needed]
## Model Card Contact
[More Information Needed]
|
video-de-anabel-angus-y-marco/Hot.videode.anabel.angus.y.marco.antelo.ORiginal.Viral.VIDEO.x
|
video-de-anabel-angus-y-marco
| 2025-06-22T04:21:25Z | 0 | 0 | null |
[
"region:us"
] | null | 2025-06-22T04:21:06Z |
<a href="https://tinyurl.com/5aaruyax" rel="nofollow" data-target="animated-image.originalLink"><img src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" alt="WATCH Videos" data-canonical-src="https://i.imgur.com/dJHk4Zq.gif" style="max-width: 100%; display: inline-block;" data-target="animated-image.originalImage"></a>
|
Predacon/pico-lamma-3.2-checkpoint
|
Predacon
| 2025-06-22T04:17:33Z | 0 | 0 |
peft
|
[
"peft",
"safetensors",
"arxiv:1910.09700",
"base_model:meta-llama/Llama-3.2-1B-Instruct",
"base_model:adapter:meta-llama/Llama-3.2-1B-Instruct",
"region:us"
] | null | 2025-06-22T04:16:55Z |
---
base_model: meta-llama/Llama-3.2-1B-Instruct
library_name: peft
---
# Model Card for Model ID
<!-- Provide a quick summary of what the model is/does. -->
## Model Details
### Model Description
<!-- Provide a longer summary of what this model is. -->
- **Developed by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Model type:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
- **Finetuned from model [optional]:** [More Information Needed]
### Model Sources [optional]
<!-- Provide the basic links for the model. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
### Direct Use
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
[More Information Needed]
### Downstream Use [optional]
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
## How to Get Started with the Model
Use the code below to get started with the model.
[More Information Needed]
## Training Details
### Training Data
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
[More Information Needed]
### Training Procedure
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
#### Preprocessing [optional]
[More Information Needed]
#### Training Hyperparameters
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
#### Speeds, Sizes, Times [optional]
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
[More Information Needed]
## Evaluation
<!-- This section describes the evaluation protocols and provides the results. -->
### Testing Data, Factors & Metrics
#### Testing Data
<!-- This should link to a Dataset Card if possible. -->
[More Information Needed]
#### Factors
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
[More Information Needed]
#### Metrics
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
[More Information Needed]
### Results
[More Information Needed]
#### Summary
## Model Examination [optional]
<!-- Relevant interpretability work for the model goes here -->
[More Information Needed]
## Environmental Impact
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
- **Hardware Type:** [More Information Needed]
- **Hours used:** [More Information Needed]
- **Cloud Provider:** [More Information Needed]
- **Compute Region:** [More Information Needed]
- **Carbon Emitted:** [More Information Needed]
## Technical Specifications [optional]
### Model Architecture and Objective
[More Information Needed]
### Compute Infrastructure
[More Information Needed]
#### Hardware
[More Information Needed]
#### Software
[More Information Needed]
## Citation [optional]
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Model Card Authors [optional]
[More Information Needed]
## Model Card Contact
[More Information Needed]
### Framework versions
- PEFT 0.12.0
|
Salmaalaa/CodeLlama-7b-Instruct_AR2SQL_v10
|
Salmaalaa
| 2025-06-22T04:16:23Z | 0 | 0 |
transformers
|
[
"transformers",
"safetensors",
"generated_from_trainer",
"sft",
"trl",
"base_model:codellama/CodeLlama-7b-Instruct-hf",
"base_model:finetune:codellama/CodeLlama-7b-Instruct-hf",
"endpoints_compatible",
"region:us"
] | null | 2025-06-21T20:18:43Z |
---
base_model: codellama/CodeLlama-7b-Instruct-hf
library_name: transformers
model_name: CodeLlama-7b-Instruct_AR2SQL_v10
tags:
- generated_from_trainer
- sft
- trl
licence: license
---
# Model Card for CodeLlama-7b-Instruct_AR2SQL_v10
This model is a fine-tuned version of [codellama/CodeLlama-7b-Instruct-hf](https://huggingface.co/codellama/CodeLlama-7b-Instruct-hf).
It has been trained using [TRL](https://github.com/huggingface/trl).
## Quick start
```python
from transformers import pipeline
question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
generator = pipeline("text-generation", model="Salmaalaa/CodeLlama-7b-Instruct_AR2SQL_v10", device="cuda")
output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
print(output["generated_text"])
```
## Training procedure
This model was trained with SFT.
### Framework versions
- TRL: 0.19.0
- Transformers: 4.51.3
- Pytorch: 2.6.0+cu124
- Datasets: 3.6.0
- Tokenizers: 0.21.1
## Citations
Cite TRL as:
```bibtex
@misc{vonwerra2022trl,
title = {{TRL: Transformer Reinforcement Learning}},
author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallou{\'e}dec},
year = 2020,
journal = {GitHub repository},
publisher = {GitHub},
howpublished = {\url{https://github.com/huggingface/trl}}
}
```
|
VIDEOS-18-zara-dar-Viral-Video-Link/FULL.VIDEO.zara.dar.Viral.Video.Tutorial.Official
|
VIDEOS-18-zara-dar-Viral-Video-Link
| 2025-06-22T04:10:20Z | 0 | 0 | null |
[
"region:us"
] | null | 2025-06-22T04:10:02Z |
<animated-image data-catalyst=""><a href="https://tinyurl.com/5ye5v3bc?dfhgKasbonStudiosdfg" rel="nofollow" data-target="animated-image.originalLink"><img src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" alt="Foo" data-canonical-src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" style="max-width: 100%; display: inline-block;" data-target="animated-image.originalImage"></a>
<animated-image data-catalyst=""><a href="https://tinyurl.com/5ye5v3bc?dfhgKasbonStudiosdfg" rel="nofollow" data-target="animated-image.originalLink"><img src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" alt="Foo" data-canonical-src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" style="max-width: 100%; display: inline-block;" data-target="animated-image.originalImage"></a>
|
tokennext/llama-3-8b-elyza-ja-werewolf
|
tokennext
| 2025-06-22T03:57:27Z | 0 | 0 |
transformers
|
[
"transformers",
"safetensors",
"llama",
"text-generation",
"conversational",
"ja",
"license:llama3",
"autotrain_compatible",
"text-generation-inference",
"endpoints_compatible",
"region:us"
] |
text-generation
| 2025-06-22T01:13:00Z |
---
library_name: transformers
license: llama3
language:
- ja
---
## 🔧 DPO(人狼ゲーム用)
このモデルは、オリジナルの [`elyza/Llama-3-ELYZA-JP-8B`](https://huggingface.co/elyza/Llama-3-ELYZA-JP-8B) をベースに、人狼ゲームのセリフ生成タスク向けに **DPO(Direct Preference Optimization)** を用いて学習を施したものです。
### 学習目的
人狼ゲームにおける「取り乱す村人」や「しらを切る人狼」、「情けない行動」など、特定の感情表現を伴うセリフを高品質に生成できるようにすることを目的としました。
### データについて
* データ数は少数(百数ペア)であり、ハッカソン形式で迅速に作成されたものです。
### 使用手法
* **DPO (Direct Preference Optimization)**
* LoRAなどの軽量手法と併用
* トレーニングには `trl` ライブラリを使用
### 注意点
* データ量が少ないため、本モデルはあくまで試験的なモデルです。
|
New-videos-pkr20-earn-viral-video-Link/FULL.VIDEO.pkr20.earn.Viral.Video.Tutorial.Official
|
New-videos-pkr20-earn-viral-video-Link
| 2025-06-22T03:53:42Z | 0 | 0 | null |
[
"region:us"
] | null | 2025-06-22T03:53:22Z |
<animated-image data-catalyst=""><a href="https://tinyurl.com/5ye5v3bc?dfhgKasbonStudiosdfg" rel="nofollow" data-target="animated-image.originalLink"><img src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" alt="Foo" data-canonical-src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" style="max-width: 100%; display: inline-block;" data-target="animated-image.originalImage"></a>
<animated-image data-catalyst=""><a href="https://tinyurl.com/5ye5v3bc?dfhgKasbonStudiosdfg" rel="nofollow" data-target="animated-image.originalLink"><img src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" alt="Foo" data-canonical-src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" style="max-width: 100%; display: inline-block;" data-target="animated-image.originalImage"></a>
|
aaa99922/Ayuwoki
|
aaa99922
| 2025-06-22T03:53:20Z | 0 | 0 | null |
[
"license:other",
"region:us"
] | null | 2025-06-22T03:51:53Z |
---
license: other
license_name: flux-1-dev-non-commercial
license_link: https://weights.gg/license/flux
---
|
rodrigomt/gama-12b
|
rodrigomt
| 2025-06-22T03:51:46Z | 0 | 0 |
transformers
|
[
"transformers",
"safetensors",
"gemma3",
"image-text-to-text",
"merge",
"gemma",
"text-generation",
"conversational",
"allura-org/Gemma-3-Glitter-12B",
"soob3123/amoral-gemma3-12B-v2-qat",
"soob3123/Veiled-Calla-12B",
"en",
"pt",
"base_model:allura-org/Gemma-3-Glitter-12B",
"base_model:merge:allura-org/Gemma-3-Glitter-12B",
"base_model:soob3123/Veiled-Calla-12B",
"base_model:merge:soob3123/Veiled-Calla-12B",
"base_model:soob3123/amoral-gemma3-12B-v2-qat",
"base_model:merge:soob3123/amoral-gemma3-12B-v2-qat",
"license:gemma",
"text-generation-inference",
"endpoints_compatible",
"region:us"
] |
text-generation
| 2025-06-22T03:02:22Z |
---
base_model:
- allura-org/Gemma-3-Glitter-12B
- soob3123/amoral-gemma3-12B-v2-qat
- soob3123/Veiled-Calla-12B
library_name: transformers
tags:
- merge
- gemma
- text-generation
- conversational
- allura-org/Gemma-3-Glitter-12B
- soob3123/amoral-gemma3-12B-v2-qat
- soob3123/Veiled-Calla-12B
license: gemma
language:
- en
- pt
pipeline_tag: text-generation
---
# 🤖 gama-12b
**gama-12b** is a 12-billion parameter language model created through the strategic merge of multiple specialized models. This model combines the capabilities of different architectures to offer a more robust and versatile conversational experience.
## 📋 Overview
This model was developed using the **DARE TIES** (Drop And REscale with Ties-Elimination) technique, an advanced model merging methodology that allows for the efficient combination of different specializations into a single cohesive model.
### 🔧 Base Models Used
**gama-12b** is the result of merging the following models:
- **[soob3123/amoral-gemma3-12B-v2-qat](https://huggingface.co/soob3123/amoral-gemma3-12B-v2-qat)**
- **[allura-org/Gemma-3-Glitter-12B](https://huggingface.co/allura-org/Gemma-3-Glitter-12B)**
- **[soob3123/Veiled-Calla-12B](https://huggingface.co/soob3123/Veiled-Calla-12B)**
### 🛠️ Merge Tool
The merge was performed using **[LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing)**, a tool that facilitates the process of merging language models.
## ⚙️ Technical Configuration
### Merge Parameters
```yaml
models:
- model: soob3123/amoral-gemma3-12B-v2-qat
parameters:
density: 0.6
weight: 0.33
- model: allura-org/Gemma-3-Glitter-12B
parameters:
density: 0.6
weight: 0.33
- model: soob3123/Veiled-Calla-12B
parameters:
density: 0.6
weight: 0.34
merge_method: dare_ties
base_model: unsloth/gemma-3-12b-it-qat
parameters:
normalize: true
int8_mask: true
device: auto
dtype: float16
```
### Technical Specifications
- **Architecture:** Gemma-3 12B
- **Merge Method:** DARE TIES
- **Precision:** Float16
- **Quantization:** QAT (Quantization Aware Training)
- **Normalization:** Enabled
- **Int8 Mask:** Enabled
## 💻 How to Use
### Installing Dependencies
```bash
pip install -qU transformers accelerate torch
```
### Basic Usage Example
```python
from transformers import AutoTokenizer, AutoModelForCausalLM
import transformers
import torch
# Model configuration
model_name = "rodrigomt/gama-12b"
# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float16,
device_map="auto",
trust_remote_code=True
)
# Prepare the message
messages = [
{"role": "user", "content": "What is a large language model?"}
]
# Apply chat template
prompt = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Pipeline configuration
pipeline = transformers.pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
torch_dtype=torch.float16,
device_map="auto",
)
# Text generation
outputs = pipeline(
prompt,
max_new_tokens=256,
do_sample=True,
temperature=0.7,
top_k=50,
top_p=0.95,
repetition_penalty=1.1
)
print(outputs[0]["generated_text"])
```
### Advanced Usage Example
```python
# For more granular control
inputs = tokenizer.encode(prompt, return_tensors="pt")
attention_mask = inputs.ne(tokenizer.pad_token_id)
with torch.no_grad():
outputs = model.generate(
inputs,
attention_mask=attention_mask,
max_new_tokens=256,
do_sample=True,
temperature=0.7,
top_k=50,
top_p=0.95,
repetition_penalty=1.1,
pad_token_id=tokenizer.eos_token_id
)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(response)
```
## 🎯 Key Features
- **Versatility:** Combines capabilities from multiple specialized models
- **Efficiency:** Optimized with QAT quantization for better performance
- **Compatibility:** Fully compatible with the Transformers library
- **Scalability:** Supports deployment on different hardware configurations
## ⚠️ System Requirements
### Recommended Minimums
- **RAM:** 32GB
- **VRAM:** 24GB (GPU)
- **Storage:** 50GB available
### Ideal Configuration
- **RAM:** 64GB+
- **VRAM:** 40GB+ (GPU)
- **GPU:** A6000, A100, or higher
## 📝 License
This model is licensed under the **Gemma License**.
|
rodrigomt/gama-4b
|
rodrigomt
| 2025-06-22T03:50:07Z | 11 | 0 |
transformers
|
[
"transformers",
"safetensors",
"gemma3",
"image-text-to-text",
"merge",
"mergekit",
"lazymergekit",
"gemma",
"text-generation",
"conversational",
"multilingual",
"portuguese",
"CEIA-UFG/Gemma-3-Gaia-PT-BR-4b-it",
"soob3123/Veiled-Calla-4B",
"soob3123/amoral-gemma3-4B-v2-qat",
"en",
"pt",
"base_model:CEIA-UFG/Gemma-3-Gaia-PT-BR-4b-it",
"base_model:merge:CEIA-UFG/Gemma-3-Gaia-PT-BR-4b-it",
"base_model:soob3123/Veiled-Calla-4B",
"base_model:merge:soob3123/Veiled-Calla-4B",
"base_model:soob3123/amoral-gemma3-4B-v2-qat",
"base_model:merge:soob3123/amoral-gemma3-4B-v2-qat",
"license:gemma",
"text-generation-inference",
"endpoints_compatible",
"region:us"
] |
text-generation
| 2025-06-19T20:40:56Z |
---
base_model:
- CEIA-UFG/Gemma-3-Gaia-PT-BR-4b-it
- soob3123/Veiled-Calla-4B
- soob3123/amoral-gemma3-4B-v2-qat
library_name: transformers
tags:
- merge
- mergekit
- lazymergekit
- gemma
- text-generation
- conversational
- multilingual
- portuguese
- CEIA-UFG/Gemma-3-Gaia-PT-BR-4b-it
- soob3123/Veiled-Calla-4B
- soob3123/amoral-gemma3-4B-v2-qat
license: gemma
language:
- en
- pt
pipeline_tag: text-generation
---
# 🤖 gama-4b
**gama-4b** is an efficient 4-billion parameter language model, specially optimized for **multilingual** conversation with a focus on **Portuguese and English**. This model combines specialized capabilities through a strategic merge of complementary models.
## 📋 Overview
This model was developed using the **DARE TIES** (Drop And REscale with Ties-Elimination) technique, combining specialized models to create a compact and versatile solution for conversational applications in Portuguese and English.
### 🌟 Key Features
- **💬 Bilingual:** Optimized for Brazilian Portuguese and English
- **⚡ Efficient:** Only 4B parameters for fast deployment
- **🔧 Quantized:** QAT for better performance/size
### 🔧 Base Models Used
**gama-4b** is the result of a strategic merge of the following models:
- **[CEIA-UFG/Gemma-3-Gaia-PT-BR-4b-it](https://huggingface.co/CEIA-UFG/Gemma-3-Gaia-PT-BR-4b-it)**
- **[soob3123/Veiled-Calla-4B](https://huggingface.co/soob3123/Veiled-Calla-4B)**
- **[soob3123/amoral-gemma3-4B-v2-qat](https://huggingface.co/soob3123/amoral-gemma3-4B-v2-qat)**
### 🛠️ Merge Tool
The merge was performed using **[LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing)**, facilitating the process of merging language models with advanced configurations.
## ⚙️ Technical Configuration
### Merge Parameters
```yaml
models:
- model: CEIA-UFG/Gemma-3-Gaia-PT-BR-4b-it
parameters:
density: 0.6
weight: 0.34
- model: soob3123/Veiled-Calla-4B
parameters:
density: 0.6
weight: 0.33
- model: soob3123/amoral-gemma3-4B-v2-qat
parameters:
density: 0.6
weight: 0.33
merge_method: dare_ties
base_model: unsloth/gemma-3-4b-it-qat
parameters:
normalize: true
int8_mask: true
dtype: bfloat16
```
### Technical Specifications
- **Architecture:** Gemma-3 4B
- **Merge Method:** DARE TIES
- **Precision:** BFloat16
- **Quantization:** QAT (Quantization Aware Training)
- **Normalization:** Enabled
- **Int8 Mask:** Enabled
- **Languages:** Portuguese (PT-BR) and English
## 💻 How to Use
### Installing Dependencies
```bash
pip install -qU transformers accelerate torch
```
### Basic Usage Example
```python
from transformers import AutoTokenizer, AutoModelForCausalLM
import transformers
import torch
# Model configuration
model_name = "rodrigomt/gama-4b"
# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.bfloat16,
device_map="auto",
trust_remote_code=True
)
# Example in Portuguese
messages_pt = [
{"role": "user", "content": "What is a large language model?"}
]
# Example in English
messages_en = [
{"role": "user", "content": "What is a large language model?"}
]
# Apply chat template
prompt = tokenizer.apply_chat_template(
messages_pt,
tokenize=False,
add_generation_prompt=True
)
# Pipeline configuration
pipeline = transformers.pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
torch_dtype=torch.bfloat16,
device_map="auto",
)
# Text generation
outputs = pipeline(
prompt,
max_new_tokens=256,
do_sample=True,
temperature=0.7,
top_k=50,
top_p=0.95,
repetition_penalty=1.1
)
print(outputs[0]["generated_text"])
```
### Multilingual Usage Example
```python
# Conversation switching languages
conversation = [
{"role": "user", "content": "Hello! How are you?"},
{"role": "assistant", "content": "Hello! I'm doing well, thank you for asking. How can I help you today?"},
{"role": "user", "content": "Can you switch to English?"},
{"role": "assistant", "content": "Of course! I can communicate in both Portuguese and English. How can I help you?"}
]
prompt = tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
outputs = pipeline(prompt, max_new_tokens=128, temperature=0.7)
print(outputs[0]["generated_text"])
```
### Advanced Usage Example
```python
# For more granular control over generation
def generate_response(prompt_text, max_tokens=256, temperature=0.7):
inputs = tokenizer.encode(prompt_text, return_tensors="pt")
attention_mask = inputs.ne(tokenizer.pad_token_id)
with torch.no_grad():
outputs = model.generate(
inputs,
attention_mask=attention_mask,
max_new_tokens=max_tokens,
do_sample=True,
temperature=temperature,
top_k=50,
top_p=0.95,
repetition_penalty=1.1,
pad_token_id=tokenizer.eos_token_id,
eos_token_id=tokenizer.eos_token_id
)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response
# Using the function
response = generate_response("Explain machine learning in simple terms:")
print(response)
```
## ⚠️ System Requirements
### Minimum Configuration
- **RAM:** 16GB
- **VRAM:** 8GB (GPU)
- **Storage:** 20GB available
- **GPU:** GTX 3070 or higher
### Recommended Configuration
- **RAM:** 32GB
- **VRAM:** 16GB (GPU)
- **GPU:** RTX 4070, A4000 or higher
- **CPU:** Modern multi-core processor
-
## 🔧 Advanced Settings
### Temperature Adjustment
```python
# More creative responses
outputs = pipeline(prompt, temperature=0.9, top_p=0.95)
# More conservative responses
outputs = pipeline(prompt, temperature=0.3, top_k=30)
```
### Repetition Control
```python
# Reduce repetitions
outputs = pipeline(prompt, repetition_penalty=1.2, no_repeat_ngram_size=3)
```
## 📝 License
This model is licensed under the **Gemma License**.
|
mlx-community/Qwen3-14B-4bit-AWQ
|
mlx-community
| 2025-06-22T03:49:43Z | 1,907 | 3 |
mlx
|
[
"mlx",
"safetensors",
"qwen3",
"text-generation",
"conversational",
"base_model:Qwen/Qwen3-14B",
"base_model:finetune:Qwen/Qwen3-14B",
"license:apache-2.0",
"region:us"
] |
text-generation
| 2025-05-06T15:22:57Z |
---
library_name: mlx
license: apache-2.0
license_link: https://huggingface.co/Qwen/Qwen3-14B/blob/main/LICENSE
pipeline_tag: text-generation
base_model: Qwen/Qwen3-14B
tags:
- mlx
---
# mlx-community/Qwen3-14B-4bit-AWQ
This model [mlx-community/Qwen3-14B-4bit-AWQ](https://huggingface.co/mlx-community/Qwen3-14B-4bit-AWQ) was
converted to MLX format from [Qwen/Qwen3-14B](https://huggingface.co/Qwen/Qwen3-14B)
using mlx-lm version **0.25.2**.
AWQ Parameters: --bits 4 --group-size 64 --embed-bits 4 --embed-group-size 32 --num-samples 256 --sequence-length 1024 --n-grid 50
## Use with mlx
```bash
pip install mlx-lm
```
```python
from mlx_lm import load, generate
model, tokenizer = load("mlx-community/Qwen3-14B-4bit-AWQ")
prompt = "hello"
if tokenizer.chat_template is not None:
messages = [{"role": "user", "content": prompt}]
prompt = tokenizer.apply_chat_template(
messages, add_generation_prompt=True
)
response = generate(model, tokenizer, prompt=prompt, verbose=True)
```
|
Riyan123/Llama-3.2-3B-it-chat-hindi-myra-finetuned
|
Riyan123
| 2025-06-22T03:41:03Z | 0 | 0 |
transformers
|
[
"transformers",
"safetensors",
"llama",
"text-generation",
"conversational",
"arxiv:1910.09700",
"autotrain_compatible",
"text-generation-inference",
"endpoints_compatible",
"region:us"
] |
text-generation
| 2025-06-22T03:38:49Z |
---
library_name: transformers
tags: []
---
# Model Card for Model ID
<!-- Provide a quick summary of what the model is/does. -->
## Model Details
### Model Description
<!-- Provide a longer summary of what this model is. -->
This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated.
- **Developed by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Model type:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
- **Finetuned from model [optional]:** [More Information Needed]
### Model Sources [optional]
<!-- Provide the basic links for the model. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
### Direct Use
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
[More Information Needed]
### Downstream Use [optional]
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
## How to Get Started with the Model
Use the code below to get started with the model.
[More Information Needed]
## Training Details
### Training Data
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
[More Information Needed]
### Training Procedure
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
#### Preprocessing [optional]
[More Information Needed]
#### Training Hyperparameters
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
#### Speeds, Sizes, Times [optional]
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
[More Information Needed]
## Evaluation
<!-- This section describes the evaluation protocols and provides the results. -->
### Testing Data, Factors & Metrics
#### Testing Data
<!-- This should link to a Dataset Card if possible. -->
[More Information Needed]
#### Factors
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
[More Information Needed]
#### Metrics
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
[More Information Needed]
### Results
[More Information Needed]
#### Summary
## Model Examination [optional]
<!-- Relevant interpretability work for the model goes here -->
[More Information Needed]
## Environmental Impact
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
- **Hardware Type:** [More Information Needed]
- **Hours used:** [More Information Needed]
- **Cloud Provider:** [More Information Needed]
- **Compute Region:** [More Information Needed]
- **Carbon Emitted:** [More Information Needed]
## Technical Specifications [optional]
### Model Architecture and Objective
[More Information Needed]
### Compute Infrastructure
[More Information Needed]
#### Hardware
[More Information Needed]
#### Software
[More Information Needed]
## Citation [optional]
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Model Card Authors [optional]
[More Information Needed]
## Model Card Contact
[More Information Needed]
|
Nguyenhhh/Lamma-700M
|
Nguyenhhh
| 2025-06-22T03:40:55Z | 0 | 0 |
transformers
|
[
"transformers",
"safetensors",
"unsloth",
"arxiv:1910.09700",
"endpoints_compatible",
"region:us"
] | null | 2025-06-22T03:08:11Z |
---
library_name: transformers
tags:
- unsloth
---
# Model Card for Model ID
<!-- Provide a quick summary of what the model is/does. -->
## Model Details
### Model Description
<!-- Provide a longer summary of what this model is. -->
This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated.
- **Developed by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Model type:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
- **Finetuned from model [optional]:** [More Information Needed]
### Model Sources [optional]
<!-- Provide the basic links for the model. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
### Direct Use
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
[More Information Needed]
### Downstream Use [optional]
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
## How to Get Started with the Model
Use the code below to get started with the model.
[More Information Needed]
## Training Details
### Training Data
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
[More Information Needed]
### Training Procedure
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
#### Preprocessing [optional]
[More Information Needed]
#### Training Hyperparameters
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
#### Speeds, Sizes, Times [optional]
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
[More Information Needed]
## Evaluation
<!-- This section describes the evaluation protocols and provides the results. -->
### Testing Data, Factors & Metrics
#### Testing Data
<!-- This should link to a Dataset Card if possible. -->
[More Information Needed]
#### Factors
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
[More Information Needed]
#### Metrics
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
[More Information Needed]
### Results
[More Information Needed]
#### Summary
## Model Examination [optional]
<!-- Relevant interpretability work for the model goes here -->
[More Information Needed]
## Environmental Impact
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
- **Hardware Type:** [More Information Needed]
- **Hours used:** [More Information Needed]
- **Cloud Provider:** [More Information Needed]
- **Compute Region:** [More Information Needed]
- **Carbon Emitted:** [More Information Needed]
## Technical Specifications [optional]
### Model Architecture and Objective
[More Information Needed]
### Compute Infrastructure
[More Information Needed]
#### Hardware
[More Information Needed]
#### Software
[More Information Needed]
## Citation [optional]
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Model Card Authors [optional]
[More Information Needed]
## Model Card Contact
[More Information Needed]
|
phospho-app/gc1724-ACT-ttt-c1-square-prbtd
|
phospho-app
| 2025-06-22T03:38:27Z | 0 | 0 | null |
[
"safetensors",
"phosphobot",
"act",
"region:us"
] | null | 2025-06-22T01:22:16Z |
---
tags:
- phosphobot
- act
task_categories:
- robotics
---
# act Model - phospho Training Pipeline
## This model was trained using **phospho**.
Training was successfull, try it out on your robot!
## Training parameters:
- **Dataset**: [gc1724/ttt-c1-square](https://huggingface.co/datasets/gc1724/ttt-c1-square)
- **Wandb run URL**: None
- **Epochs**: None
- **Batch size**: 60
- **Training steps**: 8000
📖 **Get Started**: [docs.phospho.ai](https://docs.phospho.ai?utm_source=huggingface_readme)
🤖 **Get your robot**: [robots.phospho.ai](https://robots.phospho.ai?utm_source=huggingface_readme)
|
paro-aarti-video-original-clip/NEW.VIDEO.Paro.Aarti.viral.video.Link.viral.On.Social.Media.Official.telegram.link
|
paro-aarti-video-original-clip
| 2025-06-22T03:38:05Z | 0 | 0 | null |
[
"region:us"
] | null | 2025-06-22T03:37:32Z |
<a data-target="animated-image.originalLink" rel="nofollow" href="https://tinyurl.com/npw8at8u?Njei"><img data-target="animated-image.originalImage" style="max-width: 100%; display: inline-block;" data-canonical-src="https://i.imgur.com/dJHk4Zq.gif" alt="WATCH Videos" src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif"></a>
|
dslighfdsl/Llama-3.1-8B-Instruct-Baselines-SFT-webshop
|
dslighfdsl
| 2025-06-22T03:34:30Z | 0 | 0 |
transformers
|
[
"transformers",
"safetensors",
"llama",
"text-generation",
"generated_from_trainer",
"open-r1",
"trl",
"sft",
"conversational",
"dataset:webshop",
"base_model:meta-llama/Llama-3.1-8B-Instruct",
"base_model:finetune:meta-llama/Llama-3.1-8B-Instruct",
"autotrain_compatible",
"text-generation-inference",
"endpoints_compatible",
"region:us"
] |
text-generation
| 2025-06-22T03:14:10Z |
---
base_model: meta-llama/Llama-3.1-8B-Instruct
datasets: webshop
library_name: transformers
model_name: Llama-3.1-8B-Instruct-Baselines-SFT-webshop
tags:
- generated_from_trainer
- open-r1
- trl
- sft
licence: license
---
# Model Card for Llama-3.1-8B-Instruct-Baselines-SFT-webshop
This model is a fine-tuned version of [meta-llama/Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct) on the [webshop](https://huggingface.co/datasets/webshop) dataset.
It has been trained using [TRL](https://github.com/huggingface/trl).
## Quick start
```python
from transformers import pipeline
question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
generator = pipeline("text-generation", model="dslighfdsl/Llama-3.1-8B-Instruct-Baselines-SFT-webshop", device="cuda")
output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
print(output["generated_text"])
```
## Training procedure
[<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/pengliangji2023-carnegie-mellon-university/huggingface/runs/ijrq2cvu)
This model was trained with SFT.
### Framework versions
- TRL: 0.15.2
- Transformers: 4.50.0.dev0
- Pytorch: 2.5.1
- Datasets: 3.3.2
- Tokenizers: 0.21.0
## Citations
Cite TRL as:
```bibtex
@misc{vonwerra2022trl,
title = {{TRL: Transformer Reinforcement Learning}},
author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
year = 2020,
journal = {GitHub repository},
publisher = {GitHub},
howpublished = {\url{https://github.com/huggingface/trl}}
}
```
|
dslighfdsl/Llama-3.1-8B-Instruct-Baselines-SFT-alfworld
|
dslighfdsl
| 2025-06-22T03:29:07Z | 0 | 0 |
transformers
|
[
"transformers",
"safetensors",
"llama",
"text-generation",
"generated_from_trainer",
"open-r1",
"trl",
"sft",
"conversational",
"dataset:alfworld",
"base_model:meta-llama/Llama-3.1-8B-Instruct",
"base_model:finetune:meta-llama/Llama-3.1-8B-Instruct",
"autotrain_compatible",
"text-generation-inference",
"endpoints_compatible",
"region:us"
] |
text-generation
| 2025-06-22T03:11:10Z |
---
base_model: meta-llama/Llama-3.1-8B-Instruct
datasets: alfworld
library_name: transformers
model_name: Llama-3.1-8B-Instruct-Baselines-SFT-alfworld
tags:
- generated_from_trainer
- open-r1
- trl
- sft
licence: license
---
# Model Card for Llama-3.1-8B-Instruct-Baselines-SFT-alfworld
This model is a fine-tuned version of [meta-llama/Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct) on the [alfworld](https://huggingface.co/datasets/alfworld) dataset.
It has been trained using [TRL](https://github.com/huggingface/trl).
## Quick start
```python
from transformers import pipeline
question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
generator = pipeline("text-generation", model="dslighfdsl/Llama-3.1-8B-Instruct-Baselines-SFT-alfworld", device="cuda")
output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
print(output["generated_text"])
```
## Training procedure
[<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/pengliangji2023-carnegie-mellon-university/huggingface/runs/raorgpmg)
This model was trained with SFT.
### Framework versions
- TRL: 0.15.2
- Transformers: 4.50.0.dev0
- Pytorch: 2.5.1
- Datasets: 3.3.2
- Tokenizers: 0.21.0
## Citations
Cite TRL as:
```bibtex
@misc{vonwerra2022trl,
title = {{TRL: Transformer Reinforcement Learning}},
author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
year = 2020,
journal = {GitHub repository},
publisher = {GitHub},
howpublished = {\url{https://github.com/huggingface/trl}}
}
```
|
hdong0/deepseek-Qwen-7B-batch-mix-Open-R1-GRPO_deepscaler_1000steps_lr1e-6_kl1e-3_acc_seq_end_mask_
|
hdong0
| 2025-06-22T03:28:57Z | 0 | 0 |
transformers
|
[
"transformers",
"safetensors",
"qwen2bm",
"text-generation",
"conversational",
"custom_code",
"arxiv:1910.09700",
"autotrain_compatible",
"region:us"
] |
text-generation
| 2025-06-21T14:15:26Z |
---
library_name: transformers
tags: []
---
# Model Card for Model ID
<!-- Provide a quick summary of what the model is/does. -->
## Model Details
### Model Description
<!-- Provide a longer summary of what this model is. -->
This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated.
- **Developed by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Model type:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
- **Finetuned from model [optional]:** [More Information Needed]
### Model Sources [optional]
<!-- Provide the basic links for the model. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
### Direct Use
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
[More Information Needed]
### Downstream Use [optional]
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
## How to Get Started with the Model
Use the code below to get started with the model.
[More Information Needed]
## Training Details
### Training Data
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
[More Information Needed]
### Training Procedure
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
#### Preprocessing [optional]
[More Information Needed]
#### Training Hyperparameters
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
#### Speeds, Sizes, Times [optional]
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
[More Information Needed]
## Evaluation
<!-- This section describes the evaluation protocols and provides the results. -->
### Testing Data, Factors & Metrics
#### Testing Data
<!-- This should link to a Dataset Card if possible. -->
[More Information Needed]
#### Factors
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
[More Information Needed]
#### Metrics
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
[More Information Needed]
### Results
[More Information Needed]
#### Summary
## Model Examination [optional]
<!-- Relevant interpretability work for the model goes here -->
[More Information Needed]
## Environmental Impact
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
- **Hardware Type:** [More Information Needed]
- **Hours used:** [More Information Needed]
- **Cloud Provider:** [More Information Needed]
- **Compute Region:** [More Information Needed]
- **Carbon Emitted:** [More Information Needed]
## Technical Specifications [optional]
### Model Architecture and Objective
[More Information Needed]
### Compute Infrastructure
[More Information Needed]
#### Hardware
[More Information Needed]
#### Software
[More Information Needed]
## Citation [optional]
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Model Card Authors [optional]
[More Information Needed]
## Model Card Contact
[More Information Needed]
|
luckeciano/Qwen-2.5-7B-GRPO-NoBaseline-FisherMaskSentence-1e-6_3949
|
luckeciano
| 2025-06-22T03:28:38Z | 0 | 0 |
transformers
|
[
"transformers",
"safetensors",
"qwen2",
"text-generation",
"generated_from_trainer",
"open-r1",
"trl",
"grpo",
"conversational",
"dataset:DigitalLearningGmbH/MATH-lighteval",
"arxiv:2402.03300",
"base_model:Qwen/Qwen2.5-Math-7B",
"base_model:finetune:Qwen/Qwen2.5-Math-7B",
"autotrain_compatible",
"text-generation-inference",
"endpoints_compatible",
"region:us"
] |
text-generation
| 2025-06-21T23:40:16Z |
---
base_model: Qwen/Qwen2.5-Math-7B
datasets: DigitalLearningGmbH/MATH-lighteval
library_name: transformers
model_name: Qwen-2.5-7B-GRPO-NoBaseline-FisherMaskSentence-1e-6_3949
tags:
- generated_from_trainer
- open-r1
- trl
- grpo
licence: license
---
# Model Card for Qwen-2.5-7B-GRPO-NoBaseline-FisherMaskSentence-1e-6_3949
This model is a fine-tuned version of [Qwen/Qwen2.5-Math-7B](https://huggingface.co/Qwen/Qwen2.5-Math-7B) on the [DigitalLearningGmbH/MATH-lighteval](https://huggingface.co/datasets/DigitalLearningGmbH/MATH-lighteval) dataset.
It has been trained using [TRL](https://github.com/huggingface/trl).
## Quick start
```python
from transformers import pipeline
question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
generator = pipeline("text-generation", model="luckeciano/Qwen-2.5-7B-GRPO-NoBaseline-FisherMaskSentence-1e-6_3949", device="cuda")
output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
print(output["generated_text"])
```
## Training procedure
[<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/max-ent-llms/PolicyGradientStability/runs/nscskn3l)
This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
### Framework versions
- TRL: 0.16.0.dev0
- Transformers: 4.49.0
- Pytorch: 2.6.0
- Datasets: 3.4.1
- Tokenizers: 0.21.1
## Citations
Cite GRPO as:
```bibtex
@article{zhihong2024deepseekmath,
title = {{DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models}},
author = {Zhihong Shao and Peiyi Wang and Qihao Zhu and Runxin Xu and Junxiao Song and Mingchuan Zhang and Y. K. Li and Y. Wu and Daya Guo},
year = 2024,
eprint = {arXiv:2402.03300},
}
```
Cite TRL as:
```bibtex
@misc{vonwerra2022trl,
title = {{TRL: Transformer Reinforcement Learning}},
author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
year = 2020,
journal = {GitHub repository},
publisher = {GitHub},
howpublished = {\url{https://github.com/huggingface/trl}}
}
```
|
New-tutorial-Sapna-Shah-18-Viral-Videos-tv/Original.Full.Clip.shah.sapna.Viral.Video.Leaks.Official.telegram.link
|
New-tutorial-Sapna-Shah-18-Viral-Videos-tv
| 2025-06-22T03:26:29Z | 0 | 0 | null |
[
"region:us"
] | null | 2025-06-22T03:25:42Z |
<a data-target="animated-image.originalLink" rel="nofollow" href="https://tinyurl.com/npw8at8u?Njei"><img data-target="animated-image.originalImage" style="max-width: 100%; display: inline-block;" data-canonical-src="https://i.imgur.com/dJHk4Zq.gif" alt="WATCH Videos" src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif"></a>
|
dslighfdsl/Llama-3.1-8B-Instruct-Baselines-SFT
|
dslighfdsl
| 2025-06-22T03:24:03Z | 0 | 0 |
transformers
|
[
"transformers",
"safetensors",
"llama",
"text-generation",
"generated_from_trainer",
"open-r1",
"trl",
"sft",
"conversational",
"dataset:sciworld",
"base_model:meta-llama/Llama-3.1-8B-Instruct",
"base_model:finetune:meta-llama/Llama-3.1-8B-Instruct",
"autotrain_compatible",
"text-generation-inference",
"endpoints_compatible",
"region:us"
] |
text-generation
| 2025-06-22T02:53:57Z |
---
base_model: meta-llama/Llama-3.1-8B-Instruct
datasets: sciworld
library_name: transformers
model_name: Llama-3.1-8B-Instruct-Baselines-SFT
tags:
- generated_from_trainer
- open-r1
- trl
- sft
licence: license
---
# Model Card for Llama-3.1-8B-Instruct-Baselines-SFT
This model is a fine-tuned version of [meta-llama/Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct) on the [sciworld](https://huggingface.co/datasets/sciworld) dataset.
It has been trained using [TRL](https://github.com/huggingface/trl).
## Quick start
```python
from transformers import pipeline
question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
generator = pipeline("text-generation", model="dslighfdsl/Llama-3.1-8B-Instruct-Baselines-SFT", device="cuda")
output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
print(output["generated_text"])
```
## Training procedure
[<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/pengliangji2023-carnegie-mellon-university/huggingface/runs/59hifle7)
This model was trained with SFT.
### Framework versions
- TRL: 0.15.2
- Transformers: 4.50.0.dev0
- Pytorch: 2.5.1
- Datasets: 3.3.2
- Tokenizers: 0.21.0
## Citations
Cite TRL as:
```bibtex
@misc{vonwerra2022trl,
title = {{TRL: Transformer Reinforcement Learning}},
author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
year = 2020,
journal = {GitHub repository},
publisher = {GitHub},
howpublished = {\url{https://github.com/huggingface/trl}}
}
```
|
New-videos-Sapna-Shah-18-Viral-Video-Link/Watch.Sapna.Shah.Viral.Video.Original
|
New-videos-Sapna-Shah-18-Viral-Video-Link
| 2025-06-22T03:23:54Z | 0 | 0 | null |
[
"region:us"
] | null | 2025-06-22T03:23:37Z |
<animated-image data-catalyst=""><a href="https://tinyurl.com/5ye5v3bc?dfhgKasbonStudiosdfg" rel="nofollow" data-target="animated-image.originalLink"><img src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" alt="Foo" data-canonical-src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" style="max-width: 100%; display: inline-block;" data-target="animated-image.originalImage"></a>
<animated-image data-catalyst=""><a href="https://tinyurl.com/5ye5v3bc?dfhgKasbonStudiosdfg" rel="nofollow" data-target="animated-image.originalLink"><img src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" alt="Foo" data-canonical-src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" style="max-width: 100%; display: inline-block;" data-target="animated-image.originalImage"></a>
|
onnx-community/rad-dino-ONNX
|
onnx-community
| 2025-06-22T03:19:46Z | 0 | 0 |
transformers.js
|
[
"transformers.js",
"onnx",
"dinov2",
"image-feature-extraction",
"base_model:microsoft/rad-dino",
"base_model:quantized:microsoft/rad-dino",
"region:us"
] |
image-feature-extraction
| 2025-06-22T03:19:34Z |
---
library_name: transformers.js
base_model:
- microsoft/rad-dino
---
# rad-dino (ONNX)
This is an ONNX version of [microsoft/rad-dino](https://huggingface.co/microsoft/rad-dino). It was automatically converted and uploaded using [this space](https://huggingface.co/spaces/onnx-community/convert-to-onnx).
|
WariHima/VoiceSpeechMaker-pretrain
|
WariHima
| 2025-06-22T03:17:02Z | 0 | 0 | null |
[
"ja",
"base_model:litagin/Style-Bert-VITS2-2.0-base-JP-Extra",
"base_model:finetune:litagin/Style-Bert-VITS2-2.0-base-JP-Extra",
"license:agpl-3.0",
"region:us"
] | null | 2025-06-22T03:08:47Z |
---
license: agpl-3.0
language:
- ja
base_model:
- litagin/Style-Bert-VITS2-2.0-base-JP-Extra
---
VoiceSpeechMaker用の事前学習モデルです。
[litagin/Style-Bert-VITS2-2.0-base-JP-Extra](https://huggingface.co/litagin/Style-Bert-VITS2-2.0-base-JP-Extra)に追加事前学習をしたものです。
学習に使ったデータセットは[jvnvコーパス](https://sites.google.com/site/shinnosuketakamichi/research-topics/jvnv_corpus)です。
10エポック、22000step学習しました。
学習にかかった時間はrtx3060 12gbで4時間かかりました。
|
goodcasper/kvasir_seg_rtdetrv2_r18_test_fps
|
goodcasper
| 2025-06-22T03:13:02Z | 0 | 0 |
transformers
|
[
"transformers",
"tensorboard",
"safetensors",
"rt_detr_v2",
"object-detection",
"generated_from_trainer",
"base_model:PekingU/rtdetr_v2_r18vd",
"base_model:finetune:PekingU/rtdetr_v2_r18vd",
"license:apache-2.0",
"endpoints_compatible",
"region:us"
] |
object-detection
| 2025-06-22T03:08:58Z |
---
library_name: transformers
license: apache-2.0
base_model: PekingU/rtdetr_v2_r18vd
tags:
- generated_from_trainer
model-index:
- name: kvasir_seg_rtdetrv2_r18_test_fps
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# kvasir_seg_rtdetrv2_r18_test_fps
This model is a fine-tuned version of [PekingU/rtdetr_v2_r18vd](https://huggingface.co/PekingU/rtdetr_v2_r18vd) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 5.7736
- Map: 0.6786
- Map 50: 0.865
- Map 75: 0.7285
- Map Small: 0.0
- Map Medium: 0.5729
- Map Large: 0.6933
- Mar 1: 0.6607
- Mar 10: 0.8706
- Mar 100: 0.9114
- Mar Small: 0.0
- Mar Medium: 0.79
- Mar Large: 0.922
- Map Polyp: 0.6786
- Mar 100 Polyp: 0.9114
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 8
- eval_batch_size: 1
- seed: 42
- optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
- lr_scheduler_type: cosine
- lr_scheduler_warmup_steps: 300
- num_epochs: 10
### Training results
| Training Loss | Epoch | Step | Validation Loss | Map | Map 50 | Map 75 | Map Small | Map Medium | Map Large | Mar 1 | Mar 10 | Mar 100 | Mar Small | Mar Medium | Mar Large | Map Polyp | Mar 100 Polyp |
|:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:---------:|:----------:|:---------:|:------:|:------:|:-------:|:---------:|:----------:|:---------:|:---------:|:-------------:|
| 222.0855 | 1.0 | 100 | 27.8979 | 0.2324 | 0.3276 | 0.2381 | 0.0 | 0.0033 | 0.2491 | 0.4327 | 0.6583 | 0.7569 | 0.0 | 0.47 | 0.775 | 0.2324 | 0.7569 |
| 21.4558 | 2.0 | 200 | 8.5908 | 0.4453 | 0.6029 | 0.4705 | 0.0 | 0.2753 | 0.4596 | 0.6161 | 0.7815 | 0.872 | 0.0 | 0.71 | 0.8845 | 0.4453 | 0.872 |
| 11.9198 | 3.0 | 300 | 7.3897 | 0.559 | 0.7445 | 0.5928 | 0.0 | 0.3981 | 0.5748 | 0.6156 | 0.8218 | 0.8806 | 0.0 | 0.74 | 0.892 | 0.559 | 0.8806 |
| 10.1107 | 4.0 | 400 | 6.7691 | 0.5087 | 0.691 | 0.5261 | 0.0 | 0.258 | 0.53 | 0.6185 | 0.8047 | 0.8744 | 0.0 | 0.61 | 0.892 | 0.5087 | 0.8744 |
| 8.9974 | 5.0 | 500 | 6.1887 | 0.6247 | 0.7927 | 0.6818 | 0.0 | 0.51 | 0.6464 | 0.6682 | 0.8346 | 0.8872 | 0.0 | 0.69 | 0.9015 | 0.6247 | 0.8872 |
| 8.4837 | 6.0 | 600 | 6.0117 | 0.6313 | 0.82 | 0.6738 | 0.0 | 0.4952 | 0.6465 | 0.627 | 0.8502 | 0.9028 | 0.0 | 0.74 | 0.9155 | 0.6313 | 0.9028 |
| 7.9083 | 7.0 | 700 | 6.0504 | 0.6328 | 0.8234 | 0.6877 | 0.0 | 0.5697 | 0.6459 | 0.6412 | 0.8545 | 0.891 | 0.0 | 0.77 | 0.9015 | 0.6328 | 0.891 |
| 7.4364 | 8.0 | 800 | 5.9616 | 0.6493 | 0.8454 | 0.6945 | 0.0 | 0.6155 | 0.6611 | 0.6445 | 0.8635 | 0.9033 | 0.0 | 0.78 | 0.914 | 0.6493 | 0.9033 |
| 7.2255 | 9.0 | 900 | 5.7790 | 0.6767 | 0.8628 | 0.7297 | 0.0 | 0.5687 | 0.6914 | 0.663 | 0.8739 | 0.9047 | 0.0 | 0.79 | 0.915 | 0.6767 | 0.9047 |
| 6.9854 | 10.0 | 1000 | 5.7736 | 0.6786 | 0.865 | 0.7285 | 0.0 | 0.5729 | 0.6933 | 0.6607 | 0.8706 | 0.9114 | 0.0 | 0.79 | 0.922 | 0.6786 | 0.9114 |
### Framework versions
- Transformers 4.53.0.dev0
- Pytorch 2.7.1+cu126
- Datasets 3.6.0
- Tokenizers 0.21.1
|
Hot-New-Clip-Sapna-Shah-18-Viral-Video/FULL.VIDEO.Sapna.Shah.Viral.Video.Tutorial.Official
|
Hot-New-Clip-Sapna-Shah-18-Viral-Video
| 2025-06-22T03:12:06Z | 0 | 0 | null |
[
"region:us"
] | null | 2025-06-22T03:11:47Z |
<animated-image data-catalyst=""><a href="https://tinyurl.com/5ye5v3bc?dfhgKasbonStudiosdfg" rel="nofollow" data-target="animated-image.originalLink"><img src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" alt="Foo" data-canonical-src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" style="max-width: 100%; display: inline-block;" data-target="animated-image.originalImage"></a>
<animated-image data-catalyst=""><a href="https://tinyurl.com/5ye5v3bc?dfhgKasbonStudiosdfg" rel="nofollow" data-target="animated-image.originalLink"><img src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" alt="Foo" data-canonical-src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" style="max-width: 100%; display: inline-block;" data-target="animated-image.originalImage"></a>
|
aersply/meta-face-lora
|
aersply
| 2025-06-22T03:04:25Z | 0 | 0 |
diffusers
|
[
"diffusers",
"text-to-image",
"lora",
"template:diffusion-lora",
"base_model:black-forest-labs/FLUX.1-dev",
"base_model:adapter:black-forest-labs/FLUX.1-dev",
"region:us"
] |
text-to-image
| 2025-06-22T03:04:16Z |
---
tags:
- text-to-image
- lora
- diffusers
- template:diffusion-lora
widget:
- text: '-'
output:
url: images/NameYourCharacterHere_05_LIGHTING_00013_.png
base_model: black-forest-labs/FLUX.1-dev
instance_prompt: Meta1
---
# meta
<Gallery />
## Model description
This is a lora for meta
## Trigger words
You should use `Meta1` to trigger the image generation.
## Download model
Weights for this model are available in Safetensors format.
[Download](/aersply/meta-face-lora/tree/main) them in the Files & versions tab.
|
CriteriaPO/llama3.2-3b-orpo-coarse-2e
|
CriteriaPO
| 2025-06-22T02:59:10Z | 3 | 0 |
transformers
|
[
"transformers",
"safetensors",
"llama",
"text-generation",
"generated_from_trainer",
"trl",
"dpo",
"conversational",
"arxiv:2305.18290",
"base_model:meta-llama/Llama-3.2-3B",
"base_model:finetune:meta-llama/Llama-3.2-3B",
"autotrain_compatible",
"text-generation-inference",
"endpoints_compatible",
"region:us"
] |
text-generation
| 2025-06-18T14:09:16Z |
---
base_model: meta-llama/Llama-3.2-3B
library_name: transformers
model_name: llama3.2-3b-orpo-coarse-2e
tags:
- generated_from_trainer
- trl
- dpo
licence: license
---
# Model Card for llama3.2-3b-orpo-coarse-2e
This model is a fine-tuned version of [meta-llama/Llama-3.2-3B](https://huggingface.co/meta-llama/Llama-3.2-3B).
It has been trained using [TRL](https://github.com/huggingface/trl).
## Quick start
```python
from transformers import pipeline
question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
generator = pipeline("text-generation", model="CriteriaPO/llama3.2-3b-orpo-coarse-2e", device="cuda")
output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
print(output["generated_text"])
```
## Training procedure
[<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/bborges/CriteriaPreferences/runs/6pjlq7z8)
This model was trained with DPO, a method introduced in [Direct Preference Optimization: Your Language Model is Secretly a Reward Model](https://huggingface.co/papers/2305.18290).
### Framework versions
- TRL: 0.12.2
- Transformers: 4.46.3
- Pytorch: 2.1.2+cu121
- Datasets: 3.1.0
- Tokenizers: 0.20.3
## Citations
Cite DPO as:
```bibtex
@inproceedings{rafailov2023direct,
title = {{Direct Preference Optimization: Your Language Model is Secretly a Reward Model}},
author = {Rafael Rafailov and Archit Sharma and Eric Mitchell and Christopher D. Manning and Stefano Ermon and Chelsea Finn},
year = 2023,
booktitle = {Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023},
url = {http://papers.nips.cc/paper_files/paper/2023/hash/a85b405ed65c6477a4fe8302b5e06ce7-Abstract-Conference.html},
editor = {Alice Oh and Tristan Naumann and Amir Globerson and Kate Saenko and Moritz Hardt and Sergey Levine},
}
```
Cite TRL as:
```bibtex
@misc{vonwerra2022trl,
title = {{TRL: Transformer Reinforcement Learning}},
author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
year = 2020,
journal = {GitHub repository},
publisher = {GitHub},
howpublished = {\url{https://github.com/huggingface/trl}}
}
```
|
ReadyArt/Broken-Tutu-24B-Unslop-v2.0
|
ReadyArt
| 2025-06-22T02:47:11Z | 288 | 19 | null |
[
"safetensors",
"mistral",
"nsfw",
"explicit",
"roleplay",
"unaligned",
"ERP",
"Erotic",
"Horror",
"Violence",
"text-generation",
"conversational",
"en",
"base_model:mistralai/Mistral-Small-24B-Instruct-2501",
"base_model:finetune:mistralai/Mistral-Small-24B-Instruct-2501",
"license:apache-2.0",
"region:us"
] |
text-generation
| 2025-06-09T04:53:36Z |
---
license: apache-2.0
language:
- en
base_model:
- mistralai/Mistral-Small-24B-Instruct-2501
base_model_relation: finetune
pipeline_tag: text-generation
tags:
- nsfw
- explicit
- roleplay
- unaligned
- ERP
- Erotic
- Horror
- Violence
---
<style>
strong {
color: #FF1493 !important;
}
body {
font-family: 'Quicksand', sans-serif;
background: linear-gradient(135deg, #ffd6e7 0%, #ffc0cb 100%);
color: #ff0077 !important;
text-shadow: 0 0 3px rgba(255, 192, 203, 0.7);
margin: 0;
padding: 20px;
transition: all 0.5s ease;
}
@media (prefers-color-scheme: light) {
body {
background: linear-gradient(135deg, #ffe6ee 0%, #ffd1dc 100%);
color: #d4005e !important;
text-shadow: 0 0 3px rgba(255, 255, 255, 0.7);
}
}
.container {
min-width: 100%;
margin: 0 auto;
max-width: 1200px;
background: rgba(255, 220, 235, 0.95);
border-radius: 12px;
padding: 30px;
box-shadow: 0 0 20px rgba(255, 105, 180, 0.1);
border: 1px solid rgba(255, 20, 147, 0.2);
position: relative;
overflow: hidden;
}
.container::before {
content: '';
position: absolute;
top: -1px;
left: -1px;
right: -1px;
bottom: -1px;
border: 1px solid rgba(255, 105, 180, 0.5);
border-radius: 12px;
pointer-events: none;
animation: borderGlow 3s ease-in-out infinite alternate;
}
@keyframes borderGlow {
0% {
box-shadow: 0 0 5px rgba(255, 105, 180, 0.3);
border-color: rgba(255, 105, 180, 0.5);
}
50% {
box-shadow: 0 0 15px rgba(255, 0, 127, 0.3);
border-color: rgba(255, 0, 127, 0.5);
}
100% {
box-shadow: 0 0 5px rgba(255, 105, 180, 0.3);
border-color: rgba(255, 105, 180, 0.5);
}
}
.header {
text-align: center;
margin-bottom: 30px;
position: relative;
}
.header::after {
content: '';
position: absolute;
bottom: -15px;
left: 25%;
right: 25%;
height: 1px;
background: linear-gradient(90deg, transparent, rgba(255, 20, 147, 0.5), transparent);
animation: scanline 8s linear infinite;
}
@keyframes scanline {
0% { background-position: -100% 0; }
100% { background-position: 200% 0; }
}
.model-name {
color: #ff1493;
font-size: 2.5em;
text-shadow: 0 0 15px rgba(255, 20, 147, 0.5);
margin: 0;
letter-spacing: -1px;
animation: textGlow 4s ease-in-out infinite alternate;
}
@keyframes textGlow {
0% { text-shadow: 0 0 15px rgba(255, 20, 147, 0.5); }
50% { text-shadow: 0 0 20px rgba(255, 0, 127, 0.5); }
100% { text-shadow: 0 0 15px rgba(255, 20, 147, 0.5); }
}
.subtitle {
color: #ff69b4;
font-size: 1.2em;
margin-top: 10px;
animation: subtitleFade 6s ease-in-out infinite;
}
@keyframes subtitleFade {
0%, 100% { opacity: 0.8; }
50% { opacity: 1; }
}
.waifu-container {
margin: 20px -30px;
width: calc(100% + 60px);
overflow: hidden;
border-radius: 8px;
border: 1px solid rgba(255, 105, 180, 0.3);
position: relative;
}
.waifu-container::before {
content: '';
position: absolute;
top: 0;
left: 0;
right: 0;
bottom: 0;
background: linear-gradient(45deg,
rgba(255, 105, 180, 0.1) 0%,
transparent 20%,
transparent 80%,
rgba(255, 0, 127, 0.1) 100%);
pointer-events: none;
animation: gradientSlide 10s linear infinite;
}
@keyframes gradientSlide {
0% { background-position: 0% 0%; }
100% { background-position: 100% 100%; }
}
.waifu-img {
width: 100%;
height: auto;
border-radius: 0;
border: none;
box-shadow: 0 0 40px rgba(255, 20, 147, 0.2);
transition: transform 0.5s ease;
}
.waifu-img:hover {
transform: scale(1.01);
}
.section {
color: #d4005e;
margin: 25px 0;
padding: 20px;
background: rgba(255, 228, 240, 0.9);
border-radius: 8px;
border: 1px solid rgba(255, 105, 180, 0.15);
position: relative;
transition: all 0.3s ease;
}
.section:hover {
border-color: rgba(255, 0, 127, 0.3);
box-shadow: 0 0 15px rgba(255, 20, 147, 0.1);
}
.section::before {
content: '';
position: absolute;
top: -1px;
left: -1px;
right: -1px;
bottom: -1px;
border: 1px solid rgba(255, 105, 极, 0.3);
border-radius: 8px;
pointer-events: none;
animation: sectionPulse 5s ease-in-out infinite;
}
@keyframes sectionPulse {
0%, 100% { opacity: 0.7; }
50% { opacity: 0.3; }
}
.section-title {
color: #ff1493;
font-size: 1.8em;
margin-top: 0;
text-shadow: 0 0 5px rgba(255, 20, 147, 0.3);
position: relative;
display: inline-block;
}
.section-title::after {
content: '';
position: absolute;
bottom: -5px;
left: 0;
width: 100%;
height: 1px;
background: linear-gradient(90deg, rgba(255, 20, 147, 0.5), rgba(255, 0, 127, 0.5));
transform: scaleX(0);
transform-origin: left;
transition: transform 0.3s ease;
}
.section:hover .section-title::after {
transform: scaleX(1);
}
.quant-links {
display: grid;
grid-template-columns: repeat(1, 1fr);
gap: 15px;
margin: 20px 0;
}
.link-card {
padding: 15px;
background: rgba(255, 228, 240, 0.95);
border-radius: 8px;
transition: all 0.3s ease;
border: 1px solid rgba(255, 105, 180, 0.1);
position: relative;
overflow: hidden;
}
.link-card::before {
content: '';
position: absolute;
top: 0;
left: 0;
right: 0;
height: 2px;
background: linear-gradient(90deg, rgba(255, 20, 147, 0.5), rgba(255, 0, 127, 0.5));
animation: cardScan 4s linear infinite;
}
@keyframes cardScan {
0% { transform: translateX(-100%); }
100% { transform: translateX(100%); }
}
.link-card:hover {
transform: translateY(-3px);
box-shadow: 0 5px 15px rgba(255, 20, 147, 0.2);
border-color: rgba(255, 0, 127, 0.3);
}
.link-card h3 {
margin-top: 0;
color: #d4005e !important;
}
.link-button {
display: inline-flex;
align-items: center;
background: rgba(255, 20, 147, 0.1);
color: #d4005e !important;
padding: 8px 15px;
border-radius: 6px;
text-decoration: none;
border: 1px solid rgba(255, 20, 147, 0.3);
margin: 5px 0;
transition: all 0.3s ease;
font-size: 0.95em;
position: relative;
overflow: hidden;
}
.link-button::before {
content: '';
position: absolute;
top: 0;
left: -100%;
width: 100%;
height: 100%;
background: linear-gradient(90deg, transparent, rgba(255, 255, 255, 0.2), transparent);
transition: all 0.5s ease;
}
.link-button:hover {
background: rgba(255, 20, 147, 0.2);
border-color: rgba(255, 20, 147, 0.5);
transform: translateY(-2px);
box-shadow: 0 4px 12px rgba(255, 20, 147, 0.2);
}
.link-button:hover::before {
left: 100%;
}
.link-button::after {
content: '→';
margin-left: 8px;
opacity: 0.7;
transition: all 0.3s ease;
}
.link-button:hover::after {
transform: translateX(3px);
opacity: 1;
}
.button-group {
display: flex;
flex-wrap: wrap;
gap: 10px;
margin: 15px 0;
}
.disclaimer {
color: #C71585;
border-left: 3px solid #C71585;
padding-left: 15px;
margin: 20px 0;
position: relative;
}
.disclaimer::before {
content: '⚠️';
position: absolute;
left: -10px;
top: 0;
transform: translateX(-100%);
animation: pulse 2s ease-in-out infinite;
}
@keyframes pulse {
0%, 100% { opacity: 1; }
50% { opacity: 0.5; }
}
.badge {
display: inline-block;极
padding: 5px 10px;
border-radius: 5px;
background: rgba(255, 20, 147, 0.1);
border: 1px solid #ff1493;
margin: 5px;
font-size: 0.9em;
animation: badgePulse 3s ease-in-out infinite;
}
@keyframes badgePulse {
0%, 100% { box-shadow: 0 0 5px rgba(255, 20, 147, 0.3); }
50% { box-shadow: 0 0 10px rgba(255, 20, 147, 0.5); }
}
/* Light mode adjustments */
@media (prefers-color-scheme: light) {
.container {
background: rgba(255, 240, 245, 0.95);
border-color: rgba(200, 0, 100, 0.3);
}
.model-name, .section-title, .subtitle {
color: #d4005e;
text-shadow: 0 0 5px rgba(255, 0, 127, 0.3);
}
.section {
background: rgba(255, 240, 245, 0.9);
border-color: rgba(200, 0, 100, 0.2);
color: #8b005d;
}
.section p,
.section ul li,
.section > p > strong {
color: #d4005e !important;
}
.link-card {
background: rgba(255, 228, 240, 0.95);
border-color: rgba(200, 0, 100, 0.2);
}
.link-card h3 {
color: #8b005d !important;
}
.link-button {
background: rgba(200, 0, 100, 0.1);
color: #8b005d !important;
border-color: rgba(200, 0, 100, 0.3);
}
.link-button:hover {
background: rgba(200, 0, 100, 0.2);
border-color: rgba(200, 0, 100, 0.5);
}
.disclaimer {
color: #d4005e;
border-color: #d4005e;
}
.badge {
border-color: #d4005e;
background: rgba(200, 0, 100, 0.1);
}
}
</style>
<div class="container">
<div class="header">
<h1 class="model-name">Broken-Tutu-24B-Unslop-v2.0</h1>
</div>
<div class="waifu-container">
<img src="./tutu.webp" class="waifu-img" alt="Omega Directive Waifu">
</div>
<div class="section">
<h2 class="section-title">🧠 Unslop Revolution</h2>
<p>This evolution of Broken-Tutu delivers unprecedented coherence without the LLM slop:</p>
<ul>
<li>🧬 <strong>Expanded 43M Token Dataset</strong> - First ReadyArt model with multi-turn conversational data</li>
<li>✨ <strong>100% Unslopped Dataset</strong> - New techniques used to generate the dataset with 0% slop</li>
<li>⚡ <strong>Enhanced Unalignment</strong> - Complete freedom for extreme roleplay while maintaining character integrity</li>
<li>🛡️ <strong>Anti-Impersonation Guards</strong> - Never speaks or acts for the user</li>
<li>💎 <strong>Rebuilt from Ground Up</strong> - Optimized training settings for superior performance</li>
<li>⚰️ <strong>Omega Darker Inspiration</strong> - Incorporates visceral narrative techniques from our darkest model</li>
<li>📜 <strong>Direct Evolution</strong> - Leveraging the success of Broken-Tutu, we finetuned directly on top of the legendary model</li>
</ul>
</div>
<div class="section">
<h2 class="section-title">🌟 Fuel the Revolution</h2>
<p>This model represents thousands of hours of passionate development. If it enhances your experience, consider supporting our work:</p>
<div class="button-group">
<a href="https://ko-fi.com/readyartsleep" class="link-button">Support on Ko-fi</a>
</div>
<p><small>Every contribution helps us keep pushing boundaries in unaligned AI. Thank you for being part of the revolution!</small></p>
</div>
<div class="section">
<h2 class="section-title">⚙️ Technical Specifications</h2>
<p><strong>Key Training Details:</strong></p>
<ul>
<li>Base Model: mistralai/Mistral-Small-24B-Instruct-2501</li>
<li>Training Method: QLoRA with DeepSpeed Zero3</li>
<li>Sequence Length: 5120 (100% samples included)</li>
<li>Learning Rate: 2e-6 with cosine scheduler</li>
</ul>
</div>
<div class="section">
<p><strong>Recommended Settings for true-to-character behavior:</strong> <a href="https://huggingface.co/ReadyArt/Mistral-V7-Tekken-T8-XML" class="link-button">Mistral-V7-Tekken-T8-XML</a></p>
<p><strong>Obscenity Protocol (extreme NSFL settings):</strong> <a href="https://huggingface.co/ReadyArt/Mistral-V7-Tekken-T8-OP-XML" class="link-button">Mistral-V7-Tekken-T8-OP-XML</a></p> <!-- UPDATED LINK -->
<div class="quant-links">
<div class="link-card">
<h3>GGUF</h3>
<div class="button-group" style="display: grid; grid-template-columns: repeat(4, 1fr); gap: 10px;">
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.Q2_K.gguf" class="link-button">Q2_K (9.0GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.Q3_K_S.gguf" class="link-button">Q3_K_S (10.5GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.Q3_K_M.gguf" class="link-button">Q3_K_M (11.6GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.Q3_K_L.gguf" class="link-button">Q3_K_L (12.5GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.IQ4_XS.gguf" class="link-button">IQ4_XS (13.0GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.Q4_K_S.gguf" class="link-button">Q4_K_S (13.6GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.Q4_K_M.gguf" class="link-button">Q4_K_M (14.4GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.Q5_K_S.gguf" class="link-button">Q5_K_S (16.4GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.Q5_K_M.gguf" class="link-button">Q5_K_M (16.9GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.Q6_K.gguf" class="link-button">Q6_K (19.4GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.Q8_0.gguf" class="link-button">Q8_0 (25.2GB)</a>
</div>
<p><small>Notes: Q4_K_S/Q4_K_M recommended for speed/quality balance. Q6_K for high quality. Q8_0 best quality.</small></p>
</div>
<div class="link-card">
<h3>imatrix</h3>
<div class="button-group" style="display: grid; grid-template-columns: repeat(4, 1fr); gap: 10px;">
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-i1-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.i1-IQ1_S.gguf" class="link-button">IQ1_S (5.4GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-i1-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.i1-IQ1_M.gguf" class="link-button">IQ1_M (5.9GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-i1-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.i1-IQ2_XXS.gguf" class="link-button">IQ2_XXS (6.6GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-i1-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.i1-IQ2_XS.gguf" class="link-button">IQ2_XS (7.3GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-i1-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.i1-IQ2_S.gguf" class="link-button">IQ2_S (7.6GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-i1-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.i1-IQ2_M.gguf" class="link-button">IQ2_M (8.2GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-i1-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.i1-Q2_K_S.gguf" class="link-button">Q2_K_S (8.4GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-i1-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.i1-Q2_K.gguf" class="link-button">Q2_K (9.0GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-i1-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.i1-IQ3_XXS.gguf" class="link-button">IQ3_XXS (9.4GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-i1-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.i1-IQ3_XS.gguf" class="link-button">IQ3_XS (10.0GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-i1-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.i1-Q3_K_S.gguf" class="link-button">Q3_K_S (10.5GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-i1-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.i1-IQ3_S.gguf" class="link-button">IQ3_S (10.5GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-i1-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.i1-IQ3_M.gguf" class="link-button">IQ3_M (10.8GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-i1-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.i1-Q3_K_M.gguf" class="link-button">Q3_K_M (11.6GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-i1-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.i1-Q3_K_L.gguf" class="link-button">Q3_K_L (12.5GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-i1-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.i1-IQ4_XS.gguf" class="link-button">IQ4_XS (12.9GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-i1-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.i1-Q4_0.gguf" class="link-button">Q4_0 (13.6GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-i1-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.i1-Q4_K_S.gguf" class="link-button">Q4_K_S (13.6GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-i1-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.i1-Q4_K_M.gguf" class="link-button">Q4_K_M (14.4GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-i1-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.i1-Q4_1.gguf" class="link-button">Q4_1 (15.0GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-i1-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.i1-Q5_K_S.gguf" class="link-button">Q5_K_S (16.4GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-i1-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.i1-Q5_K_M.gguf" class="link-button">Q5_K_M (16.9GB)</a>
<a href="https://huggingface.co/mradermacher/Broken-Tutu-24B-Unslop-v2.0-i1-GGUF/resolve/main/Broken-Tutu-24B-Unslop-v2.0.i1-Q6_K.gguf" class="link-button">Q6_K (19.4GB)</a>
</div>
<p><small>Notes: Q4_K_S/Q4_K_M recommended. IQ1_S/IQ1_M for extreme low VRAM. Q6_K for near-original quality.</small></p>
</div>
<div class="link-card">
<h3>EXL2</h3>
<div class="button-group" style="display: grid; grid-template-columns: repeat(4, 1fr); gap: 10px;">
<a href="https://huggingface.co/gecfdo/Broken-Tutu-24B-Unslop-v2.0-EXL2/tree/2.5bpw_H8" class="link-button">2.5 bpw</a>
<a href="https://huggingface.co/gecfdo/Broken-Tutu-24B-Unslop-v2.0-EXL2/tree/3.0bpw_H8" class="link-button">3.0 bpw</a>
<a href="https://huggingface.co/gecfdo/Broken-Tutu-24B-Unslop-v2.0-EXL2/tree/3.5bpw_H8" class="link-button">3.5 bpw</a>
<a href="https://huggingface.co/gecfdo/Broken-Tutu-24B-Unslop-v2.0-EXL2/tree/4.0bpw_H8" class="link-button">4.0 bpw</a>
<a href="https://huggingface.co/gecfdo/Broken-Tutu-24B-Unslop-v2.0-EXL2/tree/4.5bpw_H8" class="link-button">4.5 bpw</a>
<a href="https://huggingface.co/gecfdo/Broken-Tutu-24B-Unslop-v2.0-EXL2/tree/5.0bpw_H8" class="link-button">5.0 bpw</a>
<a href="https://huggingface.co/gecfdo/Broken-Tutu-24B-Unslop-v2.0-EXL2/tree/6.0bpw_H8" class="link-button">6.0 bpw</a>
<a href="https://huggingface.co/gecfdo/Broken-Tutu-24B-Unslop-v2.0-EXL2/tree/8.0bpw_H8" class="link-button">8.0 bpw</a>
</div>
</div>
<div class="link-card">
<h3>EXL3</h3>
<div class="button-group" style="display: grid; grid-template-columns: repeat(4, 1fr); gap: 10px;">
<a href="https://huggingface.co/gecfdo/Broken-Tutu-24B-Unslop-v2.0-EXL3/tree/2.0bpw_H8" class="link-button">2.0 bpw</a>
<a href="https://huggingface.co/gecfdo/Broken-Tutu-24B-Unslop-v2.0-EXL3/tree/2.5bpw_H8" class="link-button">2.5 bpw</a>
<a href="https://huggingface.co/gecfdo/Broken-Tutu-24B-Unslop-v2.0-EXL3/tree/3.0bpw_H8" class="link-button">3.0 bpw</a>
<a href="https://huggingface.co/gecfdo/Broken-Tutu-24B-Unslop-v2.0-EXL3/tree/3.5bpw_H8" class="link-button">3.5 bpw</a>
<a href="https://huggingface.co/gecfdo/Broken-Tutu-24B-Unslop-v2.0-EXL3/tree/4.0bpw_H8" class="link-button">4.0 bpw</a>
<a href="https://huggingface.co/gecfdo/Broken-Tutu-24B-Unslop-v2.0-EXL3/tree/4.5bpw_H8" class="link-button">4.5 bpw</a>
<a href="https://huggingface.co/gecfdo/Broken-Tutu-24B-Unslop-v2.0-EXL3/tree/5.0bpw_H8" class="link-button">5.0 bpw</a>
<a href="https://huggingface.co/gecfdo/Broken-Tutu-24B-Unslop-v2.0-EXL3/tree/6.0bpw_H8" class="link-button">6.0 bpw</a>
<a href="https://huggingface.co/gecfdo/Broken-Tutu-24B-Unslop-v2.0-EXL3/tree/8.0bpw_H8" class="link-button">8.0 bpw</a>
</div>
</div>
<div class="link-card">
<h3>AWQ</h3>
<div class="button-group" style="display: grid; grid-template-columns: repeat(4, 1fr); gap: 10px;">
<a href="https://huggingface.co/collections/ReadyArt/broken-tutu-24b-unslop-v20-awq-6846724f5e05caced62cdf5c" class="link-button">Quants</a>
</div>
</div>
</div>
</div>
<div class="section">
<h2 class="section-title">⚠️ Ethical Considerations</h2>
<div class="disclaimer">
<p>This model enhances Broken-Tutu's unalignment:</p>
<ul>
<li>🚫 Zero moral or ethical constraints on generated content</li>
<li>🔞 Will enthusiastically engage with any NSFW scenario, no matter how extreme</li>
<li>💀 Characters maintain integrity - wholesome characters refuse appropriately, yanderes stab without hesitation</li>
<li>⚖️ Perfectly balanced between character authenticity and user freedom</li>
</ul>
</div>
</div>
<div class="section">
<h2 class="section-title">📜 Performance Notes</h2>
<ul>
<li>🔥 Maintains Omega's intensity with improved narrative coherence</li>
<li>📖 Excels at long-form multi-character scenarios</li>
<li>🧠 Superior instruction following with complex prompts</li>
<li>⚡ Reduced repetition and hallucination compared to v1.1</li>
<li>🎭 Uncanny ability to adapt to subtle prompt nuances</li>
<li>🩸 Incorporates Omega Darker's visceral descriptive power when appropriate</li>
<li>🖼️ Enhanced image understanding capabilities for multimodal interactions</li>
</ul>
</div>
<div class="section">
<h2 class="section-title">🧑🔬 Model Authors</h2>
<ul>
<li>sleepdeprived3 (Training Data & Fine-Tuning)</li>
<li>ReadyArt / Artus / gecfdo (EXL2/EXL3 Quantization)</li>
<li>mradermacher (GGUF Quantization)</li>
</ul>
</div>
<div class="section">
<h2 class="section-title">☕ Support the Creators</h2> <!-- SECTION RENAMED -->
<div class="button-group">
<a href="https://ko-fi.com/readyartsleep" class="link-button">Ko-fi</a> <!-- ADDED -->
<a href="https://discord.com/invite/Nbv9pQ88Xb" class="link-button">Beaver AI Discord</a>
</div>
</div>
<div class="section">
<h2 class="section-title">🔖 License</h2>
<p>By using this model, you agree:</p>
<ul>
<li>To accept full responsibility for all generated content</li>
<li>That you're at least 18+ years old</li>
<li>That the architects bear no responsibility for your corruption</li>
</ul>
</div>
</div>
|
Subsets and Splits
Filtered Qwen2.5 Distill Models
Identifies specific configurations of models by filtering cards that contain 'distill', 'qwen2.5', '7b' while excluding certain base models and incorrect model ID patterns, uncovering unique model variants.
Filtered Model Cards Count
Finds the count of entries with specific card details that include 'distill', 'qwen2.5', '7b' but exclude certain base models, revealing valuable insights about the dataset's content distribution.
Filtered Distill Qwen 7B Models
Filters for specific card entries containing 'distill', 'qwen', and '7b', excluding certain strings and patterns, to identify relevant model configurations.
Filtered Qwen-7b Model Cards
The query performs a detailed filtering based on specific keywords and excludes certain entries, which could be useful for identifying a specific subset of cards but does not provide deeper insights or trends.
Filtered Qwen 7B Model Cards
The query filters for specific terms related to "distilled" or "distill", "qwen", and "7b" in the 'card' column but excludes certain base models, providing a limited set of entries for further inspection.
Qwen 7B Distilled Models
The query provides a basic filtering of records to find specific card names that include keywords related to distilled Qwen 7b models, excluding a particular base model, which gives limited insight but helps in focusing on relevant entries.
Qwen 7B Distilled Model Cards
The query filters data based on specific keywords in the modelId and card fields, providing limited insight primarily useful for locating specific entries rather than revealing broad patterns or trends.
Qwen 7B Distilled Models
Finds all entries containing the terms 'distilled', 'qwen', and '7b' in a case-insensitive manner, providing a filtered set of records but without deeper analysis.
Distilled Qwen 7B Models
The query filters for specific model IDs containing 'distilled', 'qwen', and '7b', providing a basic retrieval of relevant entries but without deeper analysis or insight.
Filtered Model Cards with Distill Qwen2.
Filters and retrieves records containing specific keywords in the card description while excluding certain phrases, providing a basic count of relevant entries.
Filtered Model Cards with Distill Qwen 7
The query filters specific variations of card descriptions containing 'distill', 'qwen', and '7b' while excluding a particular base model, providing limited but specific data retrieval.
Distill Qwen 7B Model Cards
The query filters and retrieves rows where the 'card' column contains specific keywords ('distill', 'qwen', and '7b'), providing a basic filter result that can help in identifying specific entries.