slug
stringlengths 15
15
| content
listlengths 1
129
| rawContent
stringlengths 1
2k
| author
dict | attachments
listlengths 0
49
| mentions
listlengths 0
49
| reactions
listlengths 0
12
| publishedAt
stringlengths 24
24
| updatedAt
stringlengths 24
24
| commentators
listlengths 0
52
| url
stringlengths 25
46
| totalUniqueImpressions
int64 1
42.1k
⌀ | numComments
int64 0
621
|
---|---|---|---|---|---|---|---|---|---|---|---|---|
811529118817010 | [
{
"type": "text",
"value": "Welcome to LLMLingua-2, a small-size yet powerful prompt compression method trained via data distillation from GPT-4 for token classification with a BERT-level encoder, excels in task-agnostic compression. It surpasses LLMLingua in handling out-of-domain data, offering 3x-6x faster performance. ",
"raw": "Welcome to LLMLingua-2, a small-size yet powerful prompt compression method trained via data distillation from GPT-4 for token classification with a BERT-level encoder, excels in task-agnostic compression. It surpasses LLMLingua in handling out-of-domain data, offering 3x-6x faster performance. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@qianhuiwu",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "qianhuiwu",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "website: ",
"raw": "website: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://llmlingua.com/llmlingua2.html",
"href": "https://llmlingua.com/llmlingua2.html",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "code: ",
"raw": "code: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/microsoft/LLMLingua",
"href": "https://github.com/microsoft/LLMLingua",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "demo: ",
"raw": "demo: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/microsoft/llmlingua-2",
"href": null,
"resource": {
"type": "space",
"id": "microsoft/llmlingua-2",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/microsoft/llmlingua-2",
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Welcome to LLMLingua-2, a small-size yet powerful prompt compression method trained via data distillation from GPT-4 for token classification with a BERT-level encoder, excels in task-agnostic compression. It surpasses LLMLingua in handling out-of-domain data, offering 3x-6x faster performance. @qianhuiwu
website: https://llmlingua.com/llmlingua2.html
code: https://github.com/microsoft/LLMLingua
demo: https://huggingface.co/spaces/microsoft/llmlingua-2 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6278bd42541f3d2dfa77ea70/ejn49eapnB3UXQckAYdTd.jpeg",
"fullname": "Huiqiang Jiang",
"name": "iofu728",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 6,
"isFollowing": false
} | [] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63ef330b1e695b35aa484e11/bXwpGy0dl8JXeJwJ--ilr.jpeg",
"fullname": "Qianhui WU",
"name": "qianhuiwu",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 3
}
] | [
{
"reaction": "🔥",
"users": [
"victor",
"osanseviero",
"alexandreteles",
"acamilogg88",
"taisazero",
"Qwoook",
"hieutrungdao"
],
"count": 7
},
{
"reaction": "❤️",
"users": [
"qianhuiwu"
],
"count": 1
}
] | 2024-03-21T14:51:50.000Z | 2024-06-14T04:21:25.938Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f17f0a0925b9863e28ad517/X7QKoiXbUtEZSG9jyvfk3.jpeg",
"fullname": "Victor Mustar",
"name": "victor",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 2607,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1655362804192-noauth.jpeg",
"fullname": "Dao Trung Hieu",
"name": "hieutrungdao",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
}
] | /posts/iofu728/811529118817010 | 1,426 | 2 |
868531703296564 | [
{
"type": "text",
"value": "I just uploaded a Dataset that adds AI descriptions to top trending Spaces! It is really useful to find awesome Spaces 🚀",
"raw": "I just uploaded a Dataset that adds AI descriptions to top trending Spaces! It is really useful to find awesome Spaces 🚀",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/victor/hf-spaces-with-descriptions/viewer/default/train",
"href": null,
"resource": {
"type": "dataset",
"id": "victor/hf-spaces-with-descriptions",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/victor/hf-spaces-with-descriptions/viewer/default/train",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "I used HF Serverless Endpoints (with Mixtral) to get the descriptions, it was seamless to do! btw check out the new Serverless Endpoints dashboard - it's awesome!",
"raw": "I used HF Serverless Endpoints (with Mixtral) to get the descriptions, it was seamless to do! btw check out the new Serverless Endpoints dashboard - it's awesome!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | I just uploaded a Dataset that adds AI descriptions to top trending Spaces! It is really useful to find awesome Spaces 🚀
https://huggingface.co/datasets/victor/hf-spaces-with-descriptions/viewer/default/train
I used HF Serverless Endpoints (with Mixtral) to get the descriptions, it was seamless to do! btw check out the new Serverless Endpoints dashboard - it's awesome! | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f17f0a0925b9863e28ad517/X7QKoiXbUtEZSG9jyvfk3.jpeg",
"fullname": "Victor Mustar",
"name": "victor",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 2607,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/5f17f0a0925b9863e28ad517/zDNI68L4cEWag3jEwzOCH.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/5f17f0a0925b9863e28ad517/jiYnY_11MSpopZS381alo.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/5f17f0a0925b9863e28ad517/Cd032H083VI7FL7zuHgfP.png"
}
] | [] | [
{
"reaction": "🚀",
"users": [
"osanseviero",
"radames",
"samusenps",
"m-ric",
"nmnijilkhan",
"pierrci"
],
"count": 6
}
] | 2024-03-21T10:27:49.000Z | 2024-03-21T11:51:24.270Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f17f0a0925b9863e28ad517/X7QKoiXbUtEZSG9jyvfk3.jpeg",
"fullname": "Victor Mustar",
"name": "victor",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 2607,
"isFollowing": false
}
] | /posts/victor/868531703296564 | 2,200 | 2 |
665155769111798 | [
{
"type": "text",
"value": "Just released a dataset with 1.5M image question/answers! ",
"raw": "Just released a dataset with 1.5M image question/answers! ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/vikhyatk/lnqa",
"href": null,
"resource": {
"type": "dataset",
"id": "vikhyatk/lnqa",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/vikhyatk/lnqa",
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Just released a dataset with 1.5M image question/answers! https://huggingface.co/datasets/vikhyatk/lnqa | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63117568fa95534e218da163/8h9zN8aKRxPLBnXW7sqY9.jpeg",
"fullname": "Vik Korrapati",
"name": "vikhyatk",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 375,
"isFollowing": false
} | [] | [] | [
{
"reaction": "❤️",
"users": [
"Felladrin",
"btbytes",
"femboysLover",
"AIIAR",
"ajibawa-2023",
"samusenps",
"radames",
"clefourrier",
"kristaller486",
"VictorSanh",
"smangrul",
"zelus82",
"Sylvestre"
],
"count": 13
},
{
"reaction": "👀",
"users": [
"osanseviero"
],
"count": 1
},
{
"reaction": "😎",
"users": [
"samusenps"
],
"count": 1
}
] | 2024-03-21T08:15:50.000Z | 2024-03-21T08:15:50.886Z | [] | /posts/vikhyatk/665155769111798 | 2,228 | 0 |
883512866010622 | [
{
"type": "text",
"value": "Testing new pix2pix-Turbo in real-time, very interesting GAN architecture that leverages SD-Turbo model. Here I'm using edge2image LoRA single-step inference 🤯",
"raw": "Testing new pix2pix-Turbo in real-time, very interesting GAN architecture that leverages SD-Turbo model. Here I'm using edge2image LoRA single-step inference 🤯",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "It's very interesting how ControlNet Canny quality is comparable, but in a single step. Looking forward to when they release the code: ",
"raw": "It's very interesting how ControlNet Canny quality is comparable, but in a single step. Looking forward to when they release the code: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/GaParmar/img2img-turbo/issues/1",
"href": "https://github.com/GaParmar/img2img-turbo/issues/1",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "I've been keeping a list of fast diffusion model pipelines together with this real-time websocket app. Have a look if you want to test it locally, or check out the demo here on Spaces.",
"raw": "I've been keeping a list of fast diffusion model pipelines together with this real-time websocket app. Have a look if you want to test it locally, or check out the demo here on Spaces.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/radames/real-time-pix2pix-turbo",
"href": null,
"resource": {
"type": "space",
"id": "radames/real-time-pix2pix-turbo",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/radames/real-time-pix2pix-turbo",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Github app:",
"raw": "Github app:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/radames/Real-Time-Latent-Consistency-Model/",
"href": "https://github.com/radames/Real-Time-Latent-Consistency-Model/",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "You can also check the authors img2img sketch model here",
"raw": "You can also check the authors img2img sketch model here",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/gparmar/img2img-turbo-sketch",
"href": null,
"resource": {
"type": "space",
"id": "gparmar/img2img-turbo-sketch",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/gparmar/img2img-turbo-sketch",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Refs: ",
"raw": "Refs: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2403.12036",
"href": null,
"resource": {
"type": "paper",
"id": "2403.12036",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2403.12036",
"code": null,
"user": null,
"label": "One-Step Image Translation with Text-to-Image Models (2403.12036)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "cc ",
"raw": "cc ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@gparmar",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "gparmar",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@junyanz",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "junyanz",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Testing new pix2pix-Turbo in real-time, very interesting GAN architecture that leverages SD-Turbo model. Here I'm using edge2image LoRA single-step inference 🤯
It's very interesting how ControlNet Canny quality is comparable, but in a single step. Looking forward to when they release the code: https://github.com/GaParmar/img2img-turbo/issues/1
I've been keeping a list of fast diffusion model pipelines together with this real-time websocket app. Have a look if you want to test it locally, or check out the demo here on Spaces.
https://huggingface.co/spaces/radames/real-time-pix2pix-turbo
Github app:
https://github.com/radames/Real-Time-Latent-Consistency-Model/
You can also check the authors img2img sketch model here
https://huggingface.co/spaces/gparmar/img2img-turbo-sketch
Refs:
https://huggingface.co/papers/2403.12036
cc @gparmar @junyanz | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648966381588-6064e095abd8d3692e3e2ed6.jpeg",
"fullname": "Radamés Ajna",
"name": "radames",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2401,
"isFollowing": false
} | [
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6064e095abd8d3692e3e2ed6/pN6Hox4UKq5InxATqGJZC.mp4"
}
] | [
{
"avatarUrl": "/avatars/70408f77db01b465cb1462e8a4dc00a8.svg",
"fullname": "Gaurav Parmar",
"name": "gparmar",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 6
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1671474335794-noauth.jpeg",
"fullname": "Jun-Yan Zhu",
"name": "junyanz",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 3
}
] | [
{
"reaction": "❤️",
"users": [
"samusenps",
"xsa-dev",
"osanseviero",
"VeyVey",
"abidlabs",
"ChangWWWWq",
"JoeyKo1004",
"digitalcmartinez",
"gparmar",
"Csplk",
"sbarman25",
"gym890"
],
"count": 12
},
{
"reaction": "🔥",
"users": [
"samusenps",
"abidlabs",
"timestop1"
],
"count": 3
}
] | 2024-03-20T20:47:59.000Z | 2024-03-20T20:52:20.650Z | [] | /posts/radames/883512866010622 | 3,674 | 0 |
241005224895150 | [
{
"type": "text",
"value": "We've just published a detailed blog post on the creation of Cosmopedia dataset. We hope this will provide insights about generating synthetic data at scale for pre-training. ",
"raw": "We've just published a detailed blog post on the creation of Cosmopedia dataset. We hope this will provide insights about generating synthetic data at scale for pre-training. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/blog/cosmopedia",
"href": "https://huggingface.co/blog/cosmopedia",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Here are some key takeaways: ",
"raw": "Here are some key takeaways: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🎯 Prompt curation is crucial: we want to cover many topics with few duplicates.",
"raw": "🎯 Prompt curation is crucial: we want to cover many topics with few duplicates.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📚 You can leverage various resources for diversity: using different seed data, generation formats, and target audiences.",
"raw": "📚 You can leverage various resources for diversity: using different seed data, generation formats, and target audiences.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "⚙️ The importance of a good technical stack: for scalable generations with tools like llm-swarm and fast model training and evaluation.",
"raw": "⚙️ The importance of a good technical stack: for scalable generations with tools like llm-swarm and fast model training and evaluation.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Have a good read!",
"raw": "Have a good read!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | We've just published a detailed blog post on the creation of Cosmopedia dataset. We hope this will provide insights about generating synthetic data at scale for pre-training.
https://huggingface.co/blog/cosmopedia
Here are some key takeaways:
🎯 Prompt curation is crucial: we want to cover many topics with few duplicates.
📚 You can leverage various resources for diversity: using different seed data, generation formats, and target audiences.
⚙️ The importance of a good technical stack: for scalable generations with tools like llm-swarm and fast model training and evaluation.
Have a good read! | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61c141342aac764ce1654e43/81AwoT5IQ_Xdw0OVw7TKu.jpeg",
"fullname": "Loubna Ben Allal",
"name": "loubnabnl",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 2334,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/61c141342aac764ce1654e43/zqBe4DKZFLNYsuysvwsW4.png"
}
] | [] | [
{
"reaction": "🤗",
"users": [
"alielfilali01",
"anton-l",
"vwxyzjn",
"kavalciemre",
"samusenps",
"osanseviero",
"lvwerra",
"xu3kev",
"avinash02",
"thomwolf",
"fpaupier",
"manu",
"muhtasham",
"mjoshi",
"malmenea",
"AdinaY",
"victor",
"clefourrier",
"molonelaveh"
],
"count": 19
},
{
"reaction": "❤️",
"users": [
"alielfilali01",
"anton-l",
"vwxyzjn",
"samusenps",
"osanseviero",
"xu3kev",
"mmhamdy",
"thomwolf",
"muhtasham",
"aloobun",
"Salwa-Zeitoun",
"AdinaY",
"Dflare",
"molonelaveh",
"timpearce",
"svallory"
],
"count": 16
},
{
"reaction": "🔥",
"users": [
"alielfilali01",
"anton-l",
"vwxyzjn",
"severo",
"osanseviero",
"xu3kev",
"mmhamdy",
"thomwolf",
"muhtasham",
"AdinaY",
"ajibawa-2023",
"InferenceIllusionist",
"mlabonne",
"on1onmangoes",
"molonelaveh"
],
"count": 15
},
{
"reaction": "🤯",
"users": [
"svallory"
],
"count": 1
}
] | 2024-03-20T18:08:29.000Z | 2024-03-21T08:17:11.620Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1651847561574-5fcaabed246881afd5b00167.jpeg",
"fullname": "Muhtasham Oblokulov",
"name": "muhtasham",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 43,
"isFollowing": false
}
] | /posts/loubnabnl/241005224895150 | 6,395 | 1 |
912310876995424 | [
{
"type": "text",
"value": "The Era of 1-bit LLMs: Training Tips, Code and FAQ",
"raw": "The Era of 1-bit LLMs: Training Tips, Code and FAQ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/microsoft/unilm/blob/master/bitnet/The-Era-of-1-bit-LLMs__Training_Tips_Code_FAQ.pdf",
"href": "https://github.com/microsoft/unilm/blob/master/bitnet/The-Era-of-1-bit-LLMs__Training_Tips_Code_FAQ.pdf",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "We present details and tips for training 1-bit LLMs. We also provide additional experiments and results that were not reported and responses to questions regarding the \"The-Era-of-1-bit-LLM\" paper. Finally, we include the official PyTorch implementation of BitNet (b1.58 and b1) for future research and development of 1-bit LLMs.",
"raw": "We present details and tips for training 1-bit LLMs. We also provide additional experiments and results that were not reported and responses to questions regarding the \"The-Era-of-1-bit-LLM\" paper. Finally, we include the official PyTorch implementation of BitNet (b1.58 and b1) for future research and development of 1-bit LLMs.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | The Era of 1-bit LLMs: Training Tips, Code and FAQ
https://github.com/microsoft/unilm/blob/master/bitnet/The-Era-of-1-bit-LLMs__Training_Tips_Code_FAQ.pdf
We present details and tips for training 1-bit LLMs. We also provide additional experiments and results that were not reported and responses to questions regarding the "The-Era-of-1-bit-LLM" paper. Finally, we include the official PyTorch implementation of BitNet (b1.58 and b1) for future research and development of 1-bit LLMs. | {
"avatarUrl": "/avatars/3965175b320d753d9a5ccb0c7d9298a4.svg",
"fullname": "Shuming Ma",
"name": "shumingma",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 51,
"isFollowing": false
} | [] | [] | [
{
"reaction": "❤️",
"users": [
"HDiffusion",
"kaykyramos",
"osanseviero",
"ndavidson",
"samusenps",
"xu3kev",
"avinash02",
"NePe",
"victor",
"bunnycore",
"impactframes",
"AtAndDev",
"alielfilali01",
"Heihai"
],
"count": 14
},
{
"reaction": "🚀",
"users": [
"thegenerality",
"xu3kev",
"Smith42",
"AtAndDev",
"damerajee",
"InferenceIllusionist",
"Heihai"
],
"count": 7
},
{
"reaction": "🔥",
"users": [
"Inv",
"AtAndDev",
"hugingfaceg",
"Heihai"
],
"count": 4
}
] | 2024-03-20T17:47:43.000Z | 2024-03-22T04:22:44.153Z | [
{
"avatarUrl": "/avatars/a21cd50f45253229225e78514de8cbc3.svg",
"fullname": "B N",
"name": "bn999",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
}
] | /posts/shumingma/912310876995424 | 2,600 | 2 |
620513508305622 | [
{
"type": "text",
"value": "Announcing today the release of Common Corpus, the largest collection of fully open corpus on HuggingFace: nearly 500b words (600-700b tokens) in public domain.",
"raw": "Announcing today the release of Common Corpus, the largest collection of fully open corpus on HuggingFace: nearly 500b words (600-700b tokens) in public domain.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/collections/PleIAs/common-corpus-65d46e3ea3980fdcd66a5613",
"href": "https://huggingface.co/collections/PleIAs/common-corpus-65d46e3ea3980fdcd66a5613",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Common corpus is an international initiative coordinated by @pleias_fr with the support of the state start-up LANGU:IA (start-up d’Etat), supported by the French Ministry of Culture and DINUM and the involvement of the open science LLM community (Occiglot, Eleuther AI) and cultural heritage researchers.",
"raw": "Common corpus is an international initiative coordinated by @pleias_fr with the support of the state start-up LANGU:IA (start-up d’Etat), supported by the French Ministry of Culture and DINUM and the involvement of the open science LLM community (Occiglot, Eleuther AI) and cultural heritage researchers.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "We aim to create the same kind of ecosystem there is now for fine tuning at the pretraining stage, by creating a strong commons without copyright issues or \"trade secret\" gatekeeping. Contrary to what many AI companies say, Common Corpus shows it is possible to train Large Language Models on fully open corpus. Due to the complexity of copyright check, we have only released a partial amount of the text we hold and will release way more in the months.",
"raw": "We aim to create the same kind of ecosystem there is now for fine tuning at the pretraining stage, by creating a strong commons without copyright issues or \"trade secret\" gatekeeping. Contrary to what many AI companies say, Common Corpus shows it is possible to train Large Language Models on fully open corpus. Due to the complexity of copyright check, we have only released a partial amount of the text we hold and will release way more in the months.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Common Corpus is multilingual. It also includes to date the largest open collections in French (110 billion words), German (30 billion words), Spanish (23 billion words), Dutch (18 billion words), Italian (10 billion words) as well as a very long tail of middle to low resource languages.",
"raw": "Common Corpus is multilingual. It also includes to date the largest open collections in French (110 billion words), German (30 billion words), Spanish (23 billion words), Dutch (18 billion words), Italian (10 billion words) as well as a very long tail of middle to low resource languages.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Our conviction is that open corpora make future models more inclusive, democratic, and respectful of cultural diversity, as well as more qualitative. Common Corpus holds many long texts in book form, editorialized, with reasoning rich content that have never been used to date for LLM pretraining.",
"raw": "Our conviction is that open corpora make future models more inclusive, democratic, and respectful of cultural diversity, as well as more qualitative. Common Corpus holds many long texts in book form, editorialized, with reasoning rich content that have never been used to date for LLM pretraining.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Common Corpus is an ongoing work and still need to get enhanced and completed. Sharing is caring: Common Corpus still needs more care to become \"a common\" like Wikipedia or Wikisource.",
"raw": "Common Corpus is an ongoing work and still need to get enhanced and completed. Sharing is caring: Common Corpus still needs more care to become \"a common\" like Wikipedia or Wikisource.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/blog/Pclanglais/common-corpus",
"href": "https://huggingface.co/blog/Pclanglais/common-corpus",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Announcing today the release of Common Corpus, the largest collection of fully open corpus on HuggingFace: nearly 500b words (600-700b tokens) in public domain.
https://huggingface.co/collections/PleIAs/common-corpus-65d46e3ea3980fdcd66a5613
Common corpus is an international initiative coordinated by @pleias_fr with the support of the state start-up LANGU:IA (start-up d’Etat), supported by the French Ministry of Culture and DINUM and the involvement of the open science LLM community (Occiglot, Eleuther AI) and cultural heritage researchers.
We aim to create the same kind of ecosystem there is now for fine tuning at the pretraining stage, by creating a strong commons without copyright issues or "trade secret" gatekeeping. Contrary to what many AI companies say, Common Corpus shows it is possible to train Large Language Models on fully open corpus. Due to the complexity of copyright check, we have only released a partial amount of the text we hold and will release way more in the months.
Common Corpus is multilingual. It also includes to date the largest open collections in French (110 billion words), German (30 billion words), Spanish (23 billion words), Dutch (18 billion words), Italian (10 billion words) as well as a very long tail of middle to low resource languages.
Our conviction is that open corpora make future models more inclusive, democratic, and respectful of cultural diversity, as well as more qualitative. Common Corpus holds many long texts in book form, editorialized, with reasoning rich content that have never been used to date for LLM pretraining.
Common Corpus is an ongoing work and still need to get enhanced and completed. Sharing is caring: Common Corpus still needs more care to become "a common" like Wikipedia or Wikisource.
https://huggingface.co/blog/Pclanglais/common-corpus | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64ce091a9e9ca8123d7a42b0/OEPggp82RwigxNLL35LgT.jpeg",
"fullname": "Pierre-Carl Langlais",
"name": "Pclanglais",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 191,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🔥",
"users": [
"julien-c",
"storytracer",
"victor",
"osanseviero",
"giux78",
"Felladrin",
"carbonbasedLLM",
"Lewdiculous",
"SerialKicked",
"codito",
"pedevineau",
"J-Hansen",
"metasj"
],
"count": 13
},
{
"reaction": "👍",
"users": [
"tuscland"
],
"count": 1
},
{
"reaction": "🚀",
"users": [
"Lewdiculous"
],
"count": 1
}
] | 2024-03-20T17:31:47.000Z | 2024-03-20T17:31:47.746Z | [] | /posts/Pclanglais/620513508305622 | 2,473 | 0 |
486534538169374 | [
{
"type": "text",
"value": "mPLUG-DocOwl 1.5",
"raw": "mPLUG-DocOwl 1.5",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Unified Structure Learning for OCR-free Document Understanding",
"raw": "Unified Structure Learning for OCR-free Document Understanding",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2403.12895",
"href": null,
"resource": {
"type": "paper",
"id": "2403.12895",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2403.12895",
"code": null,
"user": null,
"label": "mPLUG-DocOwl 1.5: Unified Structure Learning for OCR-free Document\n Understanding (2403.12895)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Structure information is critical for understanding the semantics of text-rich images, such as documents, tables, and charts. Existing Multimodal Large Language Models (MLLMs) for Visual Document Understanding are equipped with text recognition ability but lack general structure understanding abilities for text-rich document images. In this work, we emphasize the importance of structure information in Visual Document Understanding and propose the Unified Structure Learning to boost the performance of MLLMs. Our Unified Structure Learning comprises structure-aware parsing tasks and multi-grained text localization tasks across 5 domains: document, webpage, table, chart, and natural image. To better encode structure information, we design a simple and effective vision-to-text module H-Reducer, which can not only maintain the layout information but also reduce the length of visual features by merging horizontal adjacent patches through convolution, enabling the LLM to understand high-resolution images more efficiently. Furthermore, by constructing structure-aware text sequences and multi-grained pairs of texts and bounding boxes for publicly available text-rich images, we build a comprehensive training set DocStruct4M to support structure learning. Finally, we construct a small but high-quality reasoning tuning dataset DocReason25K to trigger the detailed explanation ability in the document domain. Our model DocOwl 1.5 achieves state-of-the-art performance on 10 visual document understanding benchmarks, improving the SOTA performance of MLLMs with a 7B LLM by more than 10 points in 5/10 benchmarks.",
"raw": "Structure information is critical for understanding the semantics of text-rich images, such as documents, tables, and charts. Existing Multimodal Large Language Models (MLLMs) for Visual Document Understanding are equipped with text recognition ability but lack general structure understanding abilities for text-rich document images. In this work, we emphasize the importance of structure information in Visual Document Understanding and propose the Unified Structure Learning to boost the performance of MLLMs. Our Unified Structure Learning comprises structure-aware parsing tasks and multi-grained text localization tasks across 5 domains: document, webpage, table, chart, and natural image. To better encode structure information, we design a simple and effective vision-to-text module H-Reducer, which can not only maintain the layout information but also reduce the length of visual features by merging horizontal adjacent patches through convolution, enabling the LLM to understand high-resolution images more efficiently. Furthermore, by constructing structure-aware text sequences and multi-grained pairs of texts and bounding boxes for publicly available text-rich images, we build a comprehensive training set DocStruct4M to support structure learning. Finally, we construct a small but high-quality reasoning tuning dataset DocReason25K to trigger the detailed explanation ability in the document domain. Our model DocOwl 1.5 achieves state-of-the-art performance on 10 visual document understanding benchmarks, improving the SOTA performance of MLLMs with a 7B LLM by more than 10 points in 5/10 benchmarks.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | mPLUG-DocOwl 1.5
Unified Structure Learning for OCR-free Document Understanding
https://huggingface.co/papers/2403.12895
Structure information is critical for understanding the semantics of text-rich images, such as documents, tables, and charts. Existing Multimodal Large Language Models (MLLMs) for Visual Document Understanding are equipped with text recognition ability but lack general structure understanding abilities for text-rich document images. In this work, we emphasize the importance of structure information in Visual Document Understanding and propose the Unified Structure Learning to boost the performance of MLLMs. Our Unified Structure Learning comprises structure-aware parsing tasks and multi-grained text localization tasks across 5 domains: document, webpage, table, chart, and natural image. To better encode structure information, we design a simple and effective vision-to-text module H-Reducer, which can not only maintain the layout information but also reduce the length of visual features by merging horizontal adjacent patches through convolution, enabling the LLM to understand high-resolution images more efficiently. Furthermore, by constructing structure-aware text sequences and multi-grained pairs of texts and bounding boxes for publicly available text-rich images, we build a comprehensive training set DocStruct4M to support structure learning. Finally, we construct a small but high-quality reasoning tuning dataset DocReason25K to trigger the detailed explanation ability in the document domain. Our model DocOwl 1.5 achieves state-of-the-art performance on 10 visual document understanding benchmarks, improving the SOTA performance of MLLMs with a 7B LLM by more than 10 points in 5/10 benchmarks.
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674929746905-60f1abe7544c2adfd699860c.jpeg",
"fullname": "AK",
"name": "akhaliq",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 5205,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/60f1abe7544c2adfd699860c/-PQhAEPfW1j61aK4gLCMa.png"
}
] | [] | [
{
"reaction": "🔥",
"users": [
"radames",
"samusenps",
"victor",
"AdinaY",
"9voltfan2009"
],
"count": 5
}
] | 2024-03-20T16:29:17.000Z | 2024-03-20T16:29:31.639Z | [] | /posts/akhaliq/486534538169374 | 2,160 | 0 |
693667861895357 | [
{
"type": "text",
"value": "Tips for saving disk space with Gradio 💾",
"raw": "Tips for saving disk space with Gradio 💾",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Try these out with gradio 4.22.0 ! Code snippet attached.",
"raw": "Try these out with gradio 4.22.0 ! Code snippet attached.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "1. Set delete_cache. The delete_cache parameter will periodically delete files from gradio's cache that are older than a given age. Setting it will also delete all files created by that app when the app shuts down. It is a tuple of two ints, (frequency, age) expressed in seconds. So delete_cache=(3600, 3600), will delete files older than an hour every hour.",
"raw": "1. Set delete_cache. The delete_cache parameter will periodically delete files from gradio's cache that are older than a given age. Setting it will also delete all files created by that app when the app shuts down. It is a tuple of two ints, (frequency, age) expressed in seconds. So delete_cache=(3600, 3600), will delete files older than an hour every hour.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "2. Use static files. Static files are not copied to the cache and are instead served directly to users of your app. This is useful for components displaying a lot of content that won't change, like a gallery with hundreds of images.",
"raw": "2. Use static files. Static files are not copied to the cache and are instead served directly to users of your app. This is useful for components displaying a lot of content that won't change, like a gallery with hundreds of images.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "3. Set format=\"jpeg\" for images and galleries. JPEGs take up less disk space than PNGs. This can also speed up the speed of your prediction function as they will be written to the cache faster.",
"raw": "3. Set format=\"jpeg\" for images and galleries. JPEGs take up less disk space than PNGs. This can also speed up the speed of your prediction function as they will be written to the cache faster.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Tips for saving disk space with Gradio 💾
Try these out with gradio 4.22.0 ! Code snippet attached.
1. Set delete_cache. The delete_cache parameter will periodically delete files from gradio's cache that are older than a given age. Setting it will also delete all files created by that app when the app shuts down. It is a tuple of two ints, (frequency, age) expressed in seconds. So delete_cache=(3600, 3600), will delete files older than an hour every hour.
2. Use static files. Static files are not copied to the cache and are instead served directly to users of your app. This is useful for components displaying a lot of content that won't change, like a gallery with hundreds of images.
3. Set format="jpeg" for images and galleries. JPEGs take up less disk space than PNGs. This can also speed up the speed of your prediction function as they will be written to the cache faster.
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1654278567459-626a9bfa03e2e2796f24ca11.jpeg",
"fullname": "Freddy Boulton",
"name": "freddyaboulton",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 164,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/626a9bfa03e2e2796f24ca11/RcMyvroKAyBnxpywWWbMU.png"
}
] | [] | [
{
"reaction": "❤️",
"users": [
"clem",
"samusenps",
"abidlabs",
"radames",
"avinash02",
"MaziyarPanahi",
"victor",
"clefourrier",
"boapps"
],
"count": 9
}
] | 2024-03-20T15:09:36.000Z | 2024-03-21T15:59:41.852Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1654278567459-626a9bfa03e2e2796f24ca11.jpeg",
"fullname": "Freddy Boulton",
"name": "freddyaboulton",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 164,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1621947938344-noauth.png",
"fullname": "Abubakar Abid",
"name": "abidlabs",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 487,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648966381588-6064e095abd8d3692e3e2ed6.jpeg",
"fullname": "Radamés Ajna",
"name": "radames",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2401,
"isFollowing": false
}
] | /posts/freddyaboulton/693667861895357 | 1,670 | 3 |
900263848977211 | [
{
"type": "text",
"value": "🔥 Level up your model training w/ GaLore + Transformers for SOTA results on consumer-grade hardware!",
"raw": "🔥 Level up your model training w/ GaLore + Transformers for SOTA results on consumer-grade hardware!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "⬇️ 82.5% less optimizer state memory footprint without performance degradation by expressing the gradient weight matrix as low rank.",
"raw": "⬇️ 82.5% less optimizer state memory footprint without performance degradation by expressing the gradient weight matrix as low rank.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "👩🏿💻 Install via ",
"raw": "👩🏿💻 Install via ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "inline_code",
"value": null,
"raw": "`pip install transformers>=4.39.0 galore-torch`",
"href": null,
"resource": null,
"url": null,
"code": "pip install transformers>=4.39.0 galore-torch",
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": ". #ProudlyGpuPoor",
"raw": ". #ProudlyGpuPoor",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The integration of GaLore into the training of large language models (LLMs) marks a significant advancement in the field of deep learning, particularly in terms of memory efficiency and the democratization of AI research. By allowing for the training of billion-parameter models on consumer-grade hardware, reducing memory footprint in optimizer states, and leveraging advanced projection matrix techniques, GaLore opens new horizons for researchers and practitioners with limited access to high-end computational resources.",
"raw": "The integration of GaLore into the training of large language models (LLMs) marks a significant advancement in the field of deep learning, particularly in terms of memory efficiency and the democratization of AI research. By allowing for the training of billion-parameter models on consumer-grade hardware, reducing memory footprint in optimizer states, and leveraging advanced projection matrix techniques, GaLore opens new horizons for researchers and practitioners with limited access to high-end computational resources.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔬 Find out more about GaLore and investigate lots of juicy technical details: ",
"raw": "🔬 Find out more about GaLore and investigate lots of juicy technical details: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/blog/galore",
"href": "https://huggingface.co/blog/galore",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🤗 Huge thanks to everyone involved ❤️:",
"raw": "🤗 Huge thanks to everyone involved ❤️:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "• authors: ",
"raw": "• authors: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@jiaweizhao",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "jiaweizhao",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@Kyriection",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "Kyriection",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@beidic",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "beidic",
"label": null,
"lang": null
},
{
"type": "text",
"value": " Zhangyang Wang ",
"raw": " Zhangyang Wang ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@animakumar",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "animakumar",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@tydsh",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "tydsh",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "• community contributors: ",
"raw": "• community contributors: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@hiyouga",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "hiyouga",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@mdouglas",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "mdouglas",
"label": null,
"lang": null
},
{
"type": "text",
"value": " and others!",
"raw": " and others!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "• ",
"raw": "• ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@ybelkada",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "ybelkada",
"label": null,
"lang": null
},
{
"type": "text",
"value": " for taking such swift action in composing and coordinating necessary PRs to get this live at ⚡ speed!",
"raw": " for taking such swift action in composing and coordinating necessary PRs to get this live at ⚡ speed!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🏗️📈 Super rewarding to see how ",
"raw": "🏗️📈 Super rewarding to see how ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@timdettmers",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "timdettmers",
"label": null,
"lang": null
},
{
"type": "text",
"value": " work with optimizers is being built upon to achieve even greater heights!",
"raw": " work with optimizers is being built upon to achieve even greater heights!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🚧 Actually, there are ongoing works to integrate GaLore into bitsandbytes and optimize memory efficiency even further 💪. We'll keep you posted!",
"raw": "🚧 Actually, there are ongoing works to integrate GaLore into bitsandbytes and optimize memory efficiency even further 💪. We'll keep you posted!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 🔥 Level up your model training w/ GaLore + Transformers for SOTA results on consumer-grade hardware!
⬇️ 82.5% less optimizer state memory footprint without performance degradation by expressing the gradient weight matrix as low rank.
👩🏿💻 Install via `pip install transformers>=4.39.0 galore-torch`. #ProudlyGpuPoor
The integration of GaLore into the training of large language models (LLMs) marks a significant advancement in the field of deep learning, particularly in terms of memory efficiency and the democratization of AI research. By allowing for the training of billion-parameter models on consumer-grade hardware, reducing memory footprint in optimizer states, and leveraging advanced projection matrix techniques, GaLore opens new horizons for researchers and practitioners with limited access to high-end computational resources.
🔬 Find out more about GaLore and investigate lots of juicy technical details: https://huggingface.co/blog/galore
🤗 Huge thanks to everyone involved ❤️:
• authors: @jiaweizhao @Kyriection @beidic Zhangyang Wang @animakumar @tydsh
• community contributors: @hiyouga @mdouglas and others!
• @ybelkada for taking such swift action in composing and coordinating necessary PRs to get this live at ⚡ speed!
🏗️📈 Super rewarding to see how @timdettmers work with optimizers is being built upon to achieve even greater heights!
🚧 Actually, there are ongoing works to integrate GaLore into bitsandbytes and optimize memory efficiency even further 💪. We'll keep you posted! | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/650e8d27463e7e33e95f1963/TH-6foaqFpCso1Y7NBEF4.png",
"fullname": "Titus von Koeller",
"name": "Titus-von-Koeller",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 41,
"isFollowing": false
} | [] | [
{
"avatarUrl": "/avatars/cb9cc6d2733031582c83f56dc6cd1dd5.svg",
"fullname": "Anima Anandkumar",
"name": "animakumar",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2
},
{
"avatarUrl": "/avatars/dd21932b0c167131ee7545a622c46c3c.svg",
"fullname": "Beidi Chen",
"name": "beidic",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 3
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/642fef28a043f0ac7defa8a9/RwOEkuj3fOnOA54tGR7Ea.png",
"fullname": "Yaowei Zheng",
"name": "hiyouga",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 918
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64dd8355573d067c9e858262/Bu2kFs6-lcYc93A_SE8WU.jpeg",
"fullname": "Jiawei Zhao",
"name": "jiaweizhao",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 5
},
{
"avatarUrl": "/avatars/9967b729916d1128773102797fed1673.svg",
"fullname": "Zhenyu Zhang",
"name": "Kyriection",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 3
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6400f6cc2b67d27affcfdb93/WA6FEZy_YaZPGhIWj2zda.jpeg",
"fullname": "Matthew Douglas",
"name": "mdouglas",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 23
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1638462736111-noauth.png",
"fullname": "Tim Dettmers",
"name": "timdettmers",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 310
},
{
"avatarUrl": "/avatars/6dd2bf1f9c5679e5c8c85d62c9836aac.svg",
"fullname": "Yuandong Tian",
"name": "tydsh",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 7
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648631057413-noauth.png",
"fullname": "Younes Belkada",
"name": "ybelkada",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 417
}
] | [
{
"reaction": "🔥",
"users": [
"ybelkada",
"lucabaggi",
"t1u1",
"hiyouga",
"osanseviero",
"Priceva",
"samusenps",
"victor",
"Zyn123",
"jiaweizhao",
"Tom-Neverwinter",
"chankhavu"
],
"count": 12
},
{
"reaction": "🤝",
"users": [
"ybelkada",
"mdouglas",
"hiyouga",
"Tom-Neverwinter"
],
"count": 4
}
] | 2024-03-20T12:50:35.000Z | 2024-03-21T19:19:06.808Z | [
{
"avatarUrl": "/avatars/afbc48df2e8c47c35be48168113d83c0.svg",
"fullname": "s",
"name": "Tom-Neverwinter",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2,
"isFollowing": false
}
] | /posts/Titus-von-Koeller/900263848977211 | 1,930 | 1 |
288502341253111 | [
{
"type": "text",
"value": "🚀🎭🌟 New Research Alert - ICLR 2024! 🌟 🎭🚀",
"raw": "🚀🎭🌟 New Research Alert - ICLR 2024! 🌟 🎭🚀",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📄 Title: InstructPix2NeRF: Instructed 3D Portrait Editing from a Single Image 🌟🚀",
"raw": "📄 Title: InstructPix2NeRF: Instructed 3D Portrait Editing from a Single Image 🌟🚀",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📝 Description: InstructPix2NeRF is a novel approach to instructed 3D portrait editing from a single image, using a conditional latent 3D diffusion process and a token position randomization strategy to enable multi-semantic editing while preserving the identity of the portrait.",
"raw": "📝 Description: InstructPix2NeRF is a novel approach to instructed 3D portrait editing from a single image, using a conditional latent 3D diffusion process and a token position randomization strategy to enable multi-semantic editing while preserving the identity of the portrait.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "👥 Authors: Jianhui Li et al.",
"raw": "👥 Authors: Jianhui Li et al.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📅 Conference: ICLR, May 7-11, 2024 | Vienna, Austria 🇦🇹",
"raw": "📅 Conference: ICLR, May 7-11, 2024 | Vienna, Austria 🇦🇹",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔗 Paper: ",
"raw": "🔗 Paper: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2311.02826",
"href": null,
"resource": {
"type": "paper",
"id": "2311.02826",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2311.02826",
"code": null,
"user": null,
"label": "InstructPix2NeRF: Instructed 3D Portrait Editing from a Single Image (2311.02826)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🌐 Github Page: ",
"raw": "🌐 Github Page: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://mybabyyh.github.io/InstructPix2NeRF",
"href": "https://mybabyyh.github.io/InstructPix2NeRF",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📁 Repository: ",
"raw": "📁 Repository: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/mybabyyh/InstructPix2NeRF",
"href": "https://github.com/mybabyyh/InstructPix2NeRF",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📚 More Papers: more cutting-edge research presented at other conferences in the ",
"raw": "📚 More Papers: more cutting-edge research presented at other conferences in the ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers",
"href": null,
"resource": {
"type": "space",
"id": "DmitryRyumin/NewEraAI-Papers",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " curated by ",
"raw": " curated by ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@DmitryRyumin",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "DmitryRyumin",
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🚀 Added to the Avatars Collection: ",
"raw": "🚀 Added to the Avatars Collection: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36",
"href": null,
"resource": {
"type": "collection",
"id": "DmitryRyumin/avatars-65df37cdf81fec13d4dbac36",
"discussionNum": null
},
"url": "https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔍 Keywords: #InstructPix2NeRF #AvatarCustomization #3DPortrait #DiffusionProcess #IdentityConsistency #ICLR2024 #DeepLearning #Innovation",
"raw": "🔍 Keywords: #InstructPix2NeRF #AvatarCustomization #3DPortrait #DiffusionProcess #IdentityConsistency #ICLR2024 #DeepLearning #Innovation",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 🚀🎭🌟 New Research Alert - ICLR 2024! 🌟 🎭🚀
📄 Title: InstructPix2NeRF: Instructed 3D Portrait Editing from a Single Image 🌟🚀
📝 Description: InstructPix2NeRF is a novel approach to instructed 3D portrait editing from a single image, using a conditional latent 3D diffusion process and a token position randomization strategy to enable multi-semantic editing while preserving the identity of the portrait.
👥 Authors: Jianhui Li et al.
📅 Conference: ICLR, May 7-11, 2024 | Vienna, Austria 🇦🇹
🔗 Paper: https://huggingface.co/papers/2311.02826
🌐 Github Page: https://mybabyyh.github.io/InstructPix2NeRF
📁 Repository: https://github.com/mybabyyh/InstructPix2NeRF
📚 More Papers: more cutting-edge research presented at other conferences in the https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers curated by @DmitryRyumin
🚀 Added to the Avatars Collection: https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36
🔍 Keywords: #InstructPix2NeRF #AvatarCustomization #3DPortrait #DiffusionProcess #IdentityConsistency #ICLR2024 #DeepLearning #Innovation | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg",
"fullname": "Dmitry Ryumin",
"name": "DmitryRyumin",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 377,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/J-tdwsvpR31GXXS3xu2Nv.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/uxeczTt5wC9pdLpItCCvX.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/hk1tu7YUz_sqDzVlIimrL.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/G4W8xKBsnpMpZWL8dGnpm.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/QYELE2htzLBAb-aTjQvSx.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/9gUSzdxYSD1rYxXZdluB4.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/G4vniGIL4ZL9LOrCwO2b7.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/D0h36ggt6kz2xq1TrtUpp.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/7DG8AGk_-PP2zN6DdsZl-.png"
}
] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg",
"fullname": "Dmitry Ryumin",
"name": "DmitryRyumin",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 377
}
] | [
{
"reaction": "🔥",
"users": [
"DmitryRyumin",
"victor",
"samusenps",
"ZennyKenny",
"Priceva",
"sedayilmazer"
],
"count": 6
}
] | 2024-03-20T09:19:07.000Z | 2024-03-20T09:19:07.288Z | [] | /posts/DmitryRyumin/288502341253111 | 1,854 | 0 |
329986473893490 | [
{
"type": "text",
"value": "Realize LLM powered idea on Hugging Face Space.",
"raw": "Realize LLM powered idea on Hugging Face Space.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "I made Space for you to duplicate, then it comes with Gradio and LLM served by Hugging Face's efficient Text Generation Inference(TGI) framework packed into a single machine. ",
"raw": "I made Space for you to duplicate, then it comes with Gradio and LLM served by Hugging Face's efficient Text Generation Inference(TGI) framework packed into a single machine. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "It provides a sample app code snippet with ",
"raw": "It provides a sample app code snippet with ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "inline_code",
"value": null,
"raw": "`gr.ChatInterface`",
"href": null,
"resource": null,
"url": null,
"code": "gr.ChatInterface",
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": ". However, it is not limited to chat usage, but you can leverage the efficiency of TGI for any sort of apps built in Gradio. ",
"raw": ". However, it is not limited to chat usage, but you can leverage the efficiency of TGI for any sort of apps built in Gradio. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Have you ever enjoyed playing with Hugging Chat? Then, you will enjoy writing your own idea with this. Because both are built on top of TGI!",
"raw": "Have you ever enjoyed playing with Hugging Chat? Then, you will enjoy writing your own idea with this. Because both are built on top of TGI!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Focus on your app code, and go beyond chat!",
"raw": "Focus on your app code, and go beyond chat!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/chansung/gradio_together_tgi",
"href": null,
"resource": {
"type": "space",
"id": "chansung/gradio_together_tgi",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/chansung/gradio_together_tgi",
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Realize LLM powered idea on Hugging Face Space.
I made Space for you to duplicate, then it comes with Gradio and LLM served by Hugging Face's efficient Text Generation Inference(TGI) framework packed into a single machine.
It provides a sample app code snippet with `gr.ChatInterface`. However, it is not limited to chat usage, but you can leverage the efficiency of TGI for any sort of apps built in Gradio.
Have you ever enjoyed playing with Hugging Chat? Then, you will enjoy writing your own idea with this. Because both are built on top of TGI!
Focus on your app code, and go beyond chat!
https://huggingface.co/spaces/chansung/gradio_together_tgi | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1659971187637-60d3b57ad7b174177faabd6e.jpeg",
"fullname": "chansung park",
"name": "chansung",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 2695,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👍",
"users": [
"chansung",
"samusenps",
"victor",
"Priceva",
"radames",
"Rangers"
],
"count": 6
}
] | 2024-03-20T06:21:06.000Z | 2024-03-21T05:20:10.347Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648966381588-6064e095abd8d3692e3e2ed6.jpeg",
"fullname": "Radamés Ajna",
"name": "radames",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2401,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1659971187637-60d3b57ad7b174177faabd6e.jpeg",
"fullname": "chansung park",
"name": "chansung",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 2695,
"isFollowing": false
}
] | /posts/chansung/329986473893490 | 2,536 | 2 |
961953524095401 | [
{
"type": "text",
"value": "Are you interested in contributing to open source multilingual AI with Hugging Face and Argilla? ",
"raw": "Are you interested in contributing to open source multilingual AI with Hugging Face and Argilla? ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The MPEP initiative (",
"raw": "The MPEP initiative (",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/huggingface/data-is-better-together/tree/main/prompt_translation",
"href": "https://github.com/huggingface/data-is-better-together/tree/main/prompt_translation",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": ") of the Data is Better Together project offers the opportunity to do just that by helping to create multilingual model checkpoints. ",
"raw": ") of the Data is Better Together project offers the opportunity to do just that by helping to create multilingual model checkpoints. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "If you're interested in contributing to the Russian-language dataset, please get in touch as I am the Russian-language lead. If you're interested in contributing to another language, the MPEP link above has all the information you need to do so. 🤗 ",
"raw": "If you're interested in contributing to the Russian-language dataset, please get in touch as I am the Russian-language lead. If you're interested in contributing to another language, the MPEP link above has all the information you need to do so. 🤗 ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Are you interested in contributing to open source multilingual AI with Hugging Face and Argilla?
The MPEP initiative (https://github.com/huggingface/data-is-better-together/tree/main/prompt_translation) of the Data is Better Together project offers the opportunity to do just that by helping to create multilingual model checkpoints.
If you're interested in contributing to the Russian-language dataset, please get in touch as I am the Russian-language lead. If you're interested in contributing to another language, the MPEP link above has all the information you need to do so. 🤗 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/656e3808d4de03a07d116850/JZh4lrjFueJZVqugjoloP.jpeg",
"fullname": "Kenneth Hamilton",
"name": "ZennyKenny",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 33,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🤗",
"users": [
"davanstrien",
"samusenps",
"victor",
"taufiqdp",
"osanseviero",
"Priceva",
"sedayilmazer",
"dvilasuero",
"clefourrier",
"kristaller486",
"ZennyKenny",
"alielfilali01"
],
"count": 12
},
{
"reaction": "❤️",
"users": [
"davanstrien",
"samusenps",
"osanseviero",
"cstr",
"dvilasuero",
"clefourrier",
"ZennyKenny",
"alielfilali01",
"Tonic"
],
"count": 9
}
] | 2024-03-19T21:04:57.000Z | 2024-03-19T21:12:32.475Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1627505688463-60107b385ac3e86b3ea4fc34.jpeg",
"fullname": "Daniel van Strien",
"name": "davanstrien",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 410,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/656e3808d4de03a07d116850/JZh4lrjFueJZVqugjoloP.jpeg",
"fullname": "Kenneth Hamilton",
"name": "ZennyKenny",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 33,
"isFollowing": false
}
] | /posts/ZennyKenny/961953524095401 | 2,035 | 2 |
709432469332844 | [
{
"type": "text",
"value": "𝗨𝘀𝗶𝗻𝗴 𝗟𝗟𝗠-𝗮𝘀-𝗮-𝗷𝘂𝗱𝗴𝗲 🧑⚖️ 𝗳𝗼𝗿 𝗮𝗻 𝗮𝘂𝘁𝗼𝗺𝗮𝘁𝗲𝗱 𝗮𝗻𝗱 𝘃𝗲𝗿𝘀𝗮𝘁𝗶𝗹𝗲 𝗲𝘃𝗮𝗹𝘂𝗮𝘁𝗶𝗼𝗻",
"raw": "𝗨𝘀𝗶𝗻𝗴 𝗟𝗟𝗠-𝗮𝘀-𝗮-𝗷𝘂𝗱𝗴𝗲 🧑⚖️ 𝗳𝗼𝗿 𝗮𝗻 𝗮𝘂𝘁𝗼𝗺𝗮𝘁𝗲𝗱 𝗮𝗻𝗱 𝘃𝗲𝗿𝘀𝗮𝘁𝗶𝗹𝗲 𝗲𝘃𝗮𝗹𝘂𝗮𝘁𝗶𝗼𝗻",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Evaluating LLM outputs is often hard, since many tasks require open-ended answers for which no deterministic metrics work: for instance, when asking a model to summarize a text, there could be hundreds of correct ways to do it. The most versatile way to grade these outputs is then human evaluation, but it is very time-consuming, thus costly.",
"raw": "Evaluating LLM outputs is often hard, since many tasks require open-ended answers for which no deterministic metrics work: for instance, when asking a model to summarize a text, there could be hundreds of correct ways to do it. The most versatile way to grade these outputs is then human evaluation, but it is very time-consuming, thus costly.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🤔 Then 𝘄𝗵𝘆 𝗻𝗼𝘁 𝗮𝘀𝗸 𝗮𝗻𝗼𝘁𝗵𝗲𝗿 𝗟𝗟𝗠 𝘁𝗼 𝗱𝗼 𝘁𝗵𝗲 𝗲𝘃𝗮𝗹𝘂𝗮𝘁𝗶𝗼𝗻, by providing it relevant rating criteria? 👉 This is the idea behind LLM-as-a-judge.",
"raw": "🤔 Then 𝘄𝗵𝘆 𝗻𝗼𝘁 𝗮𝘀𝗸 𝗮𝗻𝗼𝘁𝗵𝗲𝗿 𝗟𝗟𝗠 𝘁𝗼 𝗱𝗼 𝘁𝗵𝗲 𝗲𝘃𝗮𝗹𝘂𝗮𝘁𝗶𝗼𝗻, by providing it relevant rating criteria? 👉 This is the idea behind LLM-as-a-judge.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "⚙️ To implement a LLM judge correctly, you need a few tricks.",
"raw": "⚙️ To implement a LLM judge correctly, you need a few tricks.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "✅ So 𝗜'𝘃𝗲 𝗷𝘂𝘀𝘁 𝗽𝘂𝗯𝗹𝗶𝘀𝗵𝗲𝗱 𝗮 𝗻𝗲𝘄 𝗻𝗼𝘁𝗲𝗯𝗼𝗼𝗸 𝘀𝗵𝗼𝘄𝗶𝗻𝗴 𝗵𝗼𝘄 𝘁𝗼 𝗶𝗺𝗽𝗹𝗲𝗺𝗲𝗻𝘁 𝗶𝘁 𝗽𝗿𝗼𝗽𝗲𝗿𝗹𝘆 𝗶𝗻 𝗼𝘂𝗿 𝗛𝘂𝗴𝗴𝗶𝗻𝗴 𝗙𝗮𝗰𝗲 𝗖𝗼𝗼𝗸𝗯𝗼𝗼𝗸! (you can run it instantly in Google Colab)",
"raw": "✅ So 𝗜'𝘃𝗲 𝗷𝘂𝘀𝘁 𝗽𝘂𝗯𝗹𝗶𝘀𝗵𝗲𝗱 𝗮 𝗻𝗲𝘄 𝗻𝗼𝘁𝗲𝗯𝗼𝗼𝗸 𝘀𝗵𝗼𝘄𝗶𝗻𝗴 𝗵𝗼𝘄 𝘁𝗼 𝗶𝗺𝗽𝗹𝗲𝗺𝗲𝗻𝘁 𝗶𝘁 𝗽𝗿𝗼𝗽𝗲𝗿𝗹𝘆 𝗶𝗻 𝗼𝘂𝗿 𝗛𝘂𝗴𝗴𝗶𝗻𝗴 𝗙𝗮𝗰𝗲 𝗖𝗼𝗼𝗸𝗯𝗼𝗼𝗸! (you can run it instantly in Google Colab)",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "➡️ 𝗟𝗟𝗠-𝗮𝘀-𝗮-𝗷𝘂𝗱𝗴𝗲 𝗰𝗼𝗼𝗸𝗯𝗼𝗼𝗸: ",
"raw": "➡️ 𝗟𝗟𝗠-𝗮𝘀-𝗮-𝗷𝘂𝗱𝗴𝗲 𝗰𝗼𝗼𝗸𝗯𝗼𝗼𝗸: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/learn/cookbook/llm_judge",
"href": "https://huggingface.co/learn/cookbook/llm_judge",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The Cookbook is a great collection of notebooks demonstrating recipes (thus the \"cookbook\") for common LLM usages. I recommend you to go take a look!",
"raw": "The Cookbook is a great collection of notebooks demonstrating recipes (thus the \"cookbook\") for common LLM usages. I recommend you to go take a look!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "➡️ 𝗔𝗹𝗹 𝗰𝗼𝗼𝗸𝗯𝗼𝗼𝗸𝘀: ",
"raw": "➡️ 𝗔𝗹𝗹 𝗰𝗼𝗼𝗸𝗯𝗼𝗼𝗸𝘀: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/learn/cookbook/index",
"href": "https://huggingface.co/learn/cookbook/index",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Thank you ",
"raw": "Thank you ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@MariaK",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "MariaK",
"label": null,
"lang": null
},
{
"type": "text",
"value": " for your support!",
"raw": " for your support!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 𝗨𝘀𝗶𝗻𝗴 𝗟𝗟𝗠-𝗮𝘀-𝗮-𝗷𝘂𝗱𝗴𝗲 🧑⚖️ 𝗳𝗼𝗿 𝗮𝗻 𝗮𝘂𝘁𝗼𝗺𝗮𝘁𝗲𝗱 𝗮𝗻𝗱 𝘃𝗲𝗿𝘀𝗮𝘁𝗶𝗹𝗲 𝗲𝘃𝗮𝗹𝘂𝗮𝘁𝗶𝗼𝗻
Evaluating LLM outputs is often hard, since many tasks require open-ended answers for which no deterministic metrics work: for instance, when asking a model to summarize a text, there could be hundreds of correct ways to do it. The most versatile way to grade these outputs is then human evaluation, but it is very time-consuming, thus costly.
🤔 Then 𝘄𝗵𝘆 𝗻𝗼𝘁 𝗮𝘀𝗸 𝗮𝗻𝗼𝘁𝗵𝗲𝗿 𝗟𝗟𝗠 𝘁𝗼 𝗱𝗼 𝘁𝗵𝗲 𝗲𝘃𝗮𝗹𝘂𝗮𝘁𝗶𝗼𝗻, by providing it relevant rating criteria? 👉 This is the idea behind LLM-as-a-judge.
⚙️ To implement a LLM judge correctly, you need a few tricks.
✅ So 𝗜'𝘃𝗲 𝗷𝘂𝘀𝘁 𝗽𝘂𝗯𝗹𝗶𝘀𝗵𝗲𝗱 𝗮 𝗻𝗲𝘄 𝗻𝗼𝘁𝗲𝗯𝗼𝗼𝗸 𝘀𝗵𝗼𝘄𝗶𝗻𝗴 𝗵𝗼𝘄 𝘁𝗼 𝗶𝗺𝗽𝗹𝗲𝗺𝗲𝗻𝘁 𝗶𝘁 𝗽𝗿𝗼𝗽𝗲𝗿𝗹𝘆 𝗶𝗻 𝗼𝘂𝗿 𝗛𝘂𝗴𝗴𝗶𝗻𝗴 𝗙𝗮𝗰𝗲 𝗖𝗼𝗼𝗸𝗯𝗼𝗼𝗸! (you can run it instantly in Google Colab)
➡️ 𝗟𝗟𝗠-𝗮𝘀-𝗮-𝗷𝘂𝗱𝗴𝗲 𝗰𝗼𝗼𝗸𝗯𝗼𝗼𝗸: https://huggingface.co/learn/cookbook/llm_judge
The Cookbook is a great collection of notebooks demonstrating recipes (thus the "cookbook") for common LLM usages. I recommend you to go take a look!
➡️ 𝗔𝗹𝗹 𝗰𝗼𝗼𝗸𝗯𝗼𝗼𝗸𝘀: https://huggingface.co/learn/cookbook/index
Thank you @MariaK for your support! | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg",
"fullname": "Aymeric Roucher",
"name": "m-ric",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 494,
"isFollowing": false
} | [] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1666623150508-noauth.png",
"fullname": "Maria Khalusova",
"name": "MariaK",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 54
}
] | [
{
"reaction": "🔥",
"users": [
"MariaK",
"lewtun",
"loubnabnl",
"andrewrreed",
"AtAndDev",
"ajibawa-2023",
"andysalerno",
"samusenps",
"Csplk",
"osanseviero",
"Priceva",
"avinash02",
"clefourrier"
],
"count": 13
},
{
"reaction": "❤️",
"users": [
"MariaK",
"lewtun",
"loubnabnl",
"andrewrreed",
"AtAndDev",
"samusenps",
"Zyn123",
"osanseviero"
],
"count": 8
},
{
"reaction": "🚀",
"users": [
"MariaK",
"lewtun",
"loubnabnl",
"andrewrreed",
"AtAndDev"
],
"count": 5
}
] | 2024-03-19T17:14:45.000Z | 2024-03-26T13:01:46.239Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/630f3e4002ce39336c411048/FXJON7b-aRUiH0_V2uRsi.jpeg",
"fullname": "alkinun",
"name": "AtAndDev",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 19,
"isFollowing": false
},
{
"avatarUrl": "/avatars/245ec3b183919c079f8c5023b3f7ca9f.svg",
"fullname": "CultriX",
"name": "CultriX",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 86,
"isFollowing": false
}
] | /posts/m-ric/709432469332844 | 2,039 | 2 |
585071145791233 | [
{
"type": "text",
"value": "Diaries of Open Source. Part 6!",
"raw": "Diaries of Open Source. Part 6!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🏎️xAI releases Grok-1, a 314B MoE",
"raw": "🏎️xAI releases Grok-1, a 314B MoE",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Blog: ",
"raw": "Blog: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://x.ai/blog/grok-os",
"href": "https://x.ai/blog/grok-os",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "GH repo: ",
"raw": "GH repo: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/xai-org/grok-1",
"href": "https://github.com/xai-org/grok-1",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Model: ",
"raw": "Model: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/xai-org/grok-1",
"href": null,
"resource": {
"type": "model",
"id": "xai-org/grok-1",
"discussionNum": null
},
"url": "https://hf.co/xai-org/grok-1",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🕺MusicLang, a model for controllable music generation",
"raw": "🕺MusicLang, a model for controllable music generation",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Demo: ",
"raw": "Demo: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/spaces/musiclang/musiclang-predict",
"href": null,
"resource": {
"type": "space",
"id": "musiclang/musiclang-predict",
"discussionNum": null
},
"url": "https://hf.co/spaces/musiclang/musiclang-predict",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "GH repo: ",
"raw": "GH repo: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/musiclang/musiclang_predict",
"href": "https://github.com/musiclang/musiclang_predict",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔬BioT5: a family of models for biology and chemical text tasks",
"raw": "🔬BioT5: a family of models for biology and chemical text tasks",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Base model: ",
"raw": "Base model: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/QizhiPei/biot5-base",
"href": null,
"resource": {
"type": "model",
"id": "QizhiPei/biot5-base",
"discussionNum": null
},
"url": "https://hf.co/QizhiPei/biot5-base",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Model for molecule captioning and design: ",
"raw": "Model for molecule captioning and design: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/QizhiPei/biot5-base-mol2text",
"href": null,
"resource": {
"type": "model",
"id": "QizhiPei/biot5-base-mol2text",
"discussionNum": null
},
"url": "https://hf.co/QizhiPei/biot5-base-mol2text",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " and ",
"raw": " and ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/QizhiPei/biot5-base-text2mol",
"href": null,
"resource": {
"type": "model",
"id": "QizhiPei/biot5-base-text2mol",
"discussionNum": null
},
"url": "https://hf.co/QizhiPei/biot5-base-text2mol",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "GH Repo: ",
"raw": "GH Repo: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/QizhiPei/BioT5",
"href": "https://github.com/QizhiPei/BioT5",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Paper: ",
"raw": "Paper: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/papers/2310.07276",
"href": null,
"resource": {
"type": "paper",
"id": "2310.07276",
"discussionNum": null
},
"url": "https://hf.co/papers/2310.07276",
"code": null,
"user": null,
"label": "BioT5: Enriching Cross-modal Integration in Biology with Chemical\n Knowledge and Natural Language Associations (2310.07276)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🤏Check out the AQLM and QMoE official weights from ISTA-DAS lab",
"raw": "🤏Check out the AQLM and QMoE official weights from ISTA-DAS lab",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Org: ",
"raw": "Org: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://hf.co/ISTA-DASLab",
"href": "https://hf.co/ISTA-DASLab",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Papers: ",
"raw": "Papers: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/papers/2401.06118",
"href": null,
"resource": {
"type": "paper",
"id": "2401.06118",
"discussionNum": null
},
"url": "https://hf.co/papers/2401.06118",
"code": null,
"user": null,
"label": "Extreme Compression of Large Language Models via Additive Quantization (2401.06118)",
"lang": null
},
{
"type": "text",
"value": " and ",
"raw": " and ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/papers/2310.16795",
"href": null,
"resource": {
"type": "paper",
"id": "2310.16795",
"discussionNum": null
},
"url": "https://hf.co/papers/2310.16795",
"code": null,
"user": null,
"label": "QMoE: Practical Sub-1-Bit Compression of Trillion-Parameter Models (2310.16795)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🚀Community releases",
"raw": "🚀Community releases",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Einstein-v4-7B, a Mistral fine-tune on high-quality data ",
"raw": "Einstein-v4-7B, a Mistral fine-tune on high-quality data ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/Weyaxi/Einstein-v4-7B",
"href": null,
"resource": {
"type": "model",
"id": "Weyaxi/Einstein-v4-7B",
"discussionNum": null
},
"url": "https://hf.co/Weyaxi/Einstein-v4-7B",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "IL-7B, a Misttral fine-tune merge for rheumatology ",
"raw": "IL-7B, a Misttral fine-tune merge for rheumatology ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/cmcmaster/il_7b",
"href": null,
"resource": {
"type": "model",
"id": "cmcmaster/il_7b",
"discussionNum": null
},
"url": "https://hf.co/cmcmaster/il_7b",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " Caselaw Access Project, a collaboration to digitalize 40 million US court decisions from 6.7 million cases from 360 years ",
"raw": " Caselaw Access Project, a collaboration to digitalize 40 million US court decisions from 6.7 million cases from 360 years ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://hf.co/datasets/TeraflopAI/Caselaw_Access_Project",
"href": "https://hf.co/datasets/TeraflopAI/Caselaw_Access_Project",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🌍Data and models around the world",
"raw": "🌍Data and models around the world",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "HPLT Monolingual, a dataset of 75 languages with over 40TB of data ",
"raw": "HPLT Monolingual, a dataset of 75 languages with over 40TB of data ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/datasets/HPLT/hplt_monolingual_v1_2",
"href": null,
"resource": {
"type": "dataset",
"id": "HPLT/hplt_monolingual_v1_2",
"discussionNum": null
},
"url": "https://hf.co/datasets/HPLT/hplt_monolingual_v1_2",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "OpenLLM Turkish Benchmarks & Leaderboard ",
"raw": "OpenLLM Turkish Benchmarks & Leaderboard ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/collections/malhajar/openllmturkishleadboard-datasets-65e5854490a87c0f2670ec18",
"href": null,
"resource": {
"type": "collection",
"id": "malhajar/openllmturkishleadboard-datasets-65e5854490a87c0f2670ec18",
"discussionNum": null
},
"url": "https://hf.co/collections/malhajar/openllmturkishleadboard-datasets-65e5854490a87c0f2670ec18",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " and ",
"raw": " and ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/spaces/malhajar/OpenLLMTurkishLeaderboard",
"href": null,
"resource": {
"type": "space",
"id": "malhajar/OpenLLMTurkishLeaderboard",
"discussionNum": null
},
"url": "https://hf.co/spaces/malhajar/OpenLLMTurkishLeaderboard",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Occiglot, a collaborative effort for European LLMs with an initial release of 7B models for French, German, Spanish, and Italian ",
"raw": "Occiglot, a collaborative effort for European LLMs with an initial release of 7B models for French, German, Spanish, and Italian ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/collections/occiglot/occiglot-eu5-7b-v01-65dbed502a6348b052695e01",
"href": null,
"resource": {
"type": "collection",
"id": "occiglot/occiglot-eu5-7b-v01-65dbed502a6348b052695e01",
"discussionNum": null
},
"url": "https://hf.co/collections/occiglot/occiglot-eu5-7b-v01-65dbed502a6348b052695e01",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Guftagoo, a Hindi+Hinglish multi-turn conversational dataset ",
"raw": "Guftagoo, a Hindi+Hinglish multi-turn conversational dataset ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/datasets/Tensoic/gooftagoo",
"href": null,
"resource": {
"type": "dataset",
"id": "Tensoic/gooftagoo",
"discussionNum": null
},
"url": "https://hf.co/datasets/Tensoic/gooftagoo",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "AryaBhatta-Orca-Maths-Hindi dataset ",
"raw": "AryaBhatta-Orca-Maths-Hindi dataset ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/datasets/GenVRadmin/Aryabhatta-Orca-Maths-Hindi",
"href": null,
"resource": {
"type": "dataset",
"id": "GenVRadmin/Aryabhatta-Orca-Maths-Hindi",
"discussionNum": null
},
"url": "https://hf.co/datasets/GenVRadmin/Aryabhatta-Orca-Maths-Hindi",
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Diaries of Open Source. Part 6!
🏎️xAI releases Grok-1, a 314B MoE
Blog: https://x.ai/blog/grok-os
GH repo: https://github.com/xai-org/grok-1
Model: https://hf.co/xai-org/grok-1
🕺MusicLang, a model for controllable music generation
Demo: https://hf.co/spaces/musiclang/musiclang-predict
GH repo: https://github.com/musiclang/musiclang_predict
🔬BioT5: a family of models for biology and chemical text tasks
Base model: https://hf.co/QizhiPei/biot5-base
Model for molecule captioning and design: https://hf.co/QizhiPei/biot5-base-mol2text and https://hf.co/QizhiPei/biot5-base-text2mol
GH Repo: https://github.com/QizhiPei/BioT5
Paper: https://hf.co/papers/2310.07276
🤏Check out the AQLM and QMoE official weights from ISTA-DAS lab
Org: https://hf.co/ISTA-DASLab
Papers: https://hf.co/papers/2401.06118 and https://hf.co/papers/2310.16795
🚀Community releases
Einstein-v4-7B, a Mistral fine-tune on high-quality data https://hf.co/Weyaxi/Einstein-v4-7B
IL-7B, a Misttral fine-tune merge for rheumatology https://hf.co/cmcmaster/il_7b
Caselaw Access Project, a collaboration to digitalize 40 million US court decisions from 6.7 million cases from 360 years https://hf.co/datasets/TeraflopAI/Caselaw_Access_Project
🌍Data and models around the world
HPLT Monolingual, a dataset of 75 languages with over 40TB of data https://hf.co/datasets/HPLT/hplt_monolingual_v1_2
OpenLLM Turkish Benchmarks & Leaderboard https://hf.co/collections/malhajar/openllmturkishleadboard-datasets-65e5854490a87c0f2670ec18 and https://hf.co/spaces/malhajar/OpenLLMTurkishLeaderboard
Occiglot, a collaborative effort for European LLMs with an initial release of 7B models for French, German, Spanish, and Italian https://hf.co/collections/occiglot/occiglot-eu5-7b-v01-65dbed502a6348b052695e01
Guftagoo, a Hindi+Hinglish multi-turn conversational dataset https://hf.co/datasets/Tensoic/gooftagoo
AryaBhatta-Orca-Maths-Hindi dataset https://hf.co/datasets/GenVRadmin/Aryabhatta-Orca-Maths-Hindi | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6032802e1f993496bc14d9e3/w6hr-DEQot4VVkoyRIBiy.png",
"fullname": "Omar Sanseviero",
"name": "osanseviero",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2868,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👍",
"users": [
"ajibawa-2023",
"YaTharThShaRma999",
"normanschizogh",
"samusenps",
"Euclid-Jie",
"lunarflu",
"Priceva",
"Andron00e",
"not-lain",
"vapuck"
],
"count": 10
},
{
"reaction": "❤️",
"users": [
"lunarflu",
"not-lain"
],
"count": 2
},
{
"reaction": "🤗",
"users": [
"lunarflu",
"not-lain"
],
"count": 2
}
] | 2024-03-19T16:23:57.000Z | 2024-03-19T16:34:06.410Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6032802e1f993496bc14d9e3/w6hr-DEQot4VVkoyRIBiy.png",
"fullname": "Omar Sanseviero",
"name": "osanseviero",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2868,
"isFollowing": false
}
] | /posts/osanseviero/585071145791233 | 1,914 | 1 |
497093114781728 | [
{
"type": "text",
"value": "Fast High-Resolution Image Synthesis with Latent Adversarial Diffusion Distillation",
"raw": "Fast High-Resolution Image Synthesis with Latent Adversarial Diffusion Distillation",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2403.12015",
"href": null,
"resource": {
"type": "paper",
"id": "2403.12015",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2403.12015",
"code": null,
"user": null,
"label": "Fast High-Resolution Image Synthesis with Latent Adversarial Diffusion\n Distillation (2403.12015)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Diffusion models are the main driver of progress in image and video synthesis, but suffer from slow inference speed. Distillation methods, like the recently introduced adversarial diffusion distillation (ADD) aim to shift the model from many-shot to single-step inference, albeit at the cost of expensive and difficult optimization due to its reliance on a fixed pretrained DINOv2 discriminator. We introduce Latent Adversarial Diffusion Distillation (LADD), a novel distillation approach overcoming the limitations of ADD. In contrast to pixel-based ADD, LADD utilizes generative features from pretrained latent diffusion models. This approach simplifies training and enhances performance, enabling high-resolution multi-aspect ratio image synthesis. We apply LADD to Stable Diffusion 3 (8B) to obtain SD3-Turbo, a fast model that matches the performance of state-of-the-art text-to-image generators using only four unguided sampling steps. Moreover, we systematically investigate its scaling behavior and demonstrate LADD's effectiveness in various applications such as image editing and inpainting.",
"raw": "Diffusion models are the main driver of progress in image and video synthesis, but suffer from slow inference speed. Distillation methods, like the recently introduced adversarial diffusion distillation (ADD) aim to shift the model from many-shot to single-step inference, albeit at the cost of expensive and difficult optimization due to its reliance on a fixed pretrained DINOv2 discriminator. We introduce Latent Adversarial Diffusion Distillation (LADD), a novel distillation approach overcoming the limitations of ADD. In contrast to pixel-based ADD, LADD utilizes generative features from pretrained latent diffusion models. This approach simplifies training and enhances performance, enabling high-resolution multi-aspect ratio image synthesis. We apply LADD to Stable Diffusion 3 (8B) to obtain SD3-Turbo, a fast model that matches the performance of state-of-the-art text-to-image generators using only four unguided sampling steps. Moreover, we systematically investigate its scaling behavior and demonstrate LADD's effectiveness in various applications such as image editing and inpainting.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Fast High-Resolution Image Synthesis with Latent Adversarial Diffusion Distillation
https://huggingface.co/papers/2403.12015
Diffusion models are the main driver of progress in image and video synthesis, but suffer from slow inference speed. Distillation methods, like the recently introduced adversarial diffusion distillation (ADD) aim to shift the model from many-shot to single-step inference, albeit at the cost of expensive and difficult optimization due to its reliance on a fixed pretrained DINOv2 discriminator. We introduce Latent Adversarial Diffusion Distillation (LADD), a novel distillation approach overcoming the limitations of ADD. In contrast to pixel-based ADD, LADD utilizes generative features from pretrained latent diffusion models. This approach simplifies training and enhances performance, enabling high-resolution multi-aspect ratio image synthesis. We apply LADD to Stable Diffusion 3 (8B) to obtain SD3-Turbo, a fast model that matches the performance of state-of-the-art text-to-image generators using only four unguided sampling steps. Moreover, we systematically investigate its scaling behavior and demonstrate LADD's effectiveness in various applications such as image editing and inpainting.
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674929746905-60f1abe7544c2adfd699860c.jpeg",
"fullname": "AK",
"name": "akhaliq",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 5205,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/60f1abe7544c2adfd699860c/_Z6ZDKQqw_qEWnjt3fD6z.png"
}
] | [] | [
{
"reaction": "❤️",
"users": [
"clem",
"JayMokoena",
"samusenps",
"krzysztofpapciak"
],
"count": 4
},
{
"reaction": "🔥",
"users": [
"mathiasn1",
"taohu"
],
"count": 2
}
] | 2024-03-19T14:28:23.000Z | 2024-03-19T14:28:23.356Z | [] | /posts/akhaliq/497093114781728 | 2,220 | 0 |
680129047887393 | [
{
"type": "text",
"value": "Today, I'm excited to launch two new models on the TTS Arena: MeloTTS and StyleTTS 2. Both are open sourced, permissively licensed, and highly efficient.",
"raw": "Today, I'm excited to launch two new models on the TTS Arena: MeloTTS and StyleTTS 2. Both are open sourced, permissively licensed, and highly efficient.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Curious to see how they compare with other leading models? Vote on the TTS Arena ⬇️",
"raw": "Curious to see how they compare with other leading models? Vote on the TTS Arena ⬇️",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/TTS-AGI/TTS-Arena",
"href": null,
"resource": {
"type": "space",
"id": "TTS-AGI/TTS-Arena",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/TTS-AGI/TTS-Arena",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "MeloTTS, released by MyShell AI, provides realistic and lifelike text to speech while remaining efficient and fast, even when running on CPU. It supports a variety of languages, including but not limited to English, French, Chinese, and Japanese.",
"raw": "MeloTTS, released by MyShell AI, provides realistic and lifelike text to speech while remaining efficient and fast, even when running on CPU. It supports a variety of languages, including but not limited to English, French, Chinese, and Japanese.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "StyleTTS 2 is another fully open sourced text to speech framework. It's permissively licensed, highly-efficient, and supports voice cloning and longform narration. It also provides natural and lifelike speech.",
"raw": "StyleTTS 2 is another fully open sourced text to speech framework. It's permissively licensed, highly-efficient, and supports voice cloning and longform narration. It also provides natural and lifelike speech.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Both are available now to try on the TTS Arena - vote to find which one is better! The leaderboard will be revealed once we collect enough votes.",
"raw": "Both are available now to try on the TTS Arena - vote to find which one is better! The leaderboard will be revealed once we collect enough votes.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Today, I'm excited to launch two new models on the TTS Arena: MeloTTS and StyleTTS 2. Both are open sourced, permissively licensed, and highly efficient.
Curious to see how they compare with other leading models? Vote on the TTS Arena ⬇️
https://huggingface.co/spaces/TTS-AGI/TTS-Arena
MeloTTS, released by MyShell AI, provides realistic and lifelike text to speech while remaining efficient and fast, even when running on CPU. It supports a variety of languages, including but not limited to English, French, Chinese, and Japanese.
StyleTTS 2 is another fully open sourced text to speech framework. It's permissively licensed, highly-efficient, and supports voice cloning and longform narration. It also provides natural and lifelike speech.
Both are available now to try on the TTS Arena - vote to find which one is better! The leaderboard will be revealed once we collect enough votes. | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62e54f0eae9d3f10acb95cb9/VAyk05hqB3OZWXEZW-B0q.png",
"fullname": "mrfakename",
"name": "mrfakename",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 969,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🔥",
"users": [
"Kukedlc",
"samusenps",
"osanseviero",
"theamazinceo",
"DmitryRyumin",
"AARon99",
"taufiqdp",
"victor",
"clem",
"diwank",
"jbilcke-hf",
"Hyperionllama",
"zironycho",
"pierrci",
"mirellyssl"
],
"count": 15
}
] | 2024-03-19T00:45:37.000Z | 2024-11-24T04:01:27.613Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/66d1fa61ad293ffc4b1d035b/DQ2w7UUN-dPnIdmpjpgYs.png",
"fullname": "Patrick levy-Rosenthal",
"name": "metasoulone",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62e54f0eae9d3f10acb95cb9/VAyk05hqB3OZWXEZW-B0q.png",
"fullname": "mrfakename",
"name": "mrfakename",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 969,
"isFollowing": false
}
] | /posts/mrfakename/680129047887393 | 4,118 | 12 |
536867538229257 | [
{
"type": "text",
"value": "so what was your favorite or most surprising announcement from GTC 2024?",
"raw": "so what was your favorite or most surprising announcement from GTC 2024?",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://www.youtube.com/live/Y2F8yisiS6E?si=shoZxQMHo_TTptCg",
"href": "https://www.youtube.com/live/Y2F8yisiS6E?si=shoZxQMHo_TTptCg",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | so what was your favorite or most surprising announcement from GTC 2024?
https://www.youtube.com/live/Y2F8yisiS6E?si=shoZxQMHo_TTptCg | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857746553-5df7e9e5da6d0311fd3d53f9.jpeg",
"fullname": "Thomas Wolf",
"name": "thomwolf",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 704,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"clem",
"mishig",
"julien-c",
"OmbelineM"
],
"count": 4
}
] | 2024-03-18T23:24:08.000Z | 2024-03-19T13:35:16.429Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6538119803519fddb4a17e10/ffJMkdx-rM7VvLTCM6ri_.jpeg",
"fullname": "samusenps",
"name": "samusenps",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 91,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64aea8ff67511bd3d965697b/Jxn52EmDF5RApJh8antxn.jpeg",
"fullname": "Feynman Innovations",
"name": "ajibawa-2023",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 138,
"isFollowing": false
}
] | /posts/thomwolf/536867538229257 | 2,864 | 3 |
187303171643734 | [
{
"type": "text",
"value": "Uni-SMART",
"raw": "Uni-SMART",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Universal Science Multimodal Analysis and Research Transformer",
"raw": "Universal Science Multimodal Analysis and Research Transformer",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2403.10301",
"href": null,
"resource": {
"type": "paper",
"id": "2403.10301",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2403.10301",
"code": null,
"user": null,
"label": "Uni-SMART: Universal Science Multimodal Analysis and Research\n Transformer (2403.10301)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "In scientific research and its application, scientific literature analysis is crucial as it allows researchers to build on the work of others. However, the fast growth of scientific knowledge has led to a massive increase in scholarly articles, making in-depth literature analysis increasingly challenging and time-consuming. The emergence of Large Language Models (LLMs) has offered a new way to address this challenge. Known for their strong abilities in summarizing texts, LLMs are seen as a potential tool to improve the analysis of scientific literature. However, existing LLMs have their own limits. Scientific literature often includes a wide range of multimodal elements, such as molecular structure, tables, and charts, which are hard for text-focused LLMs to understand and analyze. This issue points to the urgent need for new solutions that can fully understand and analyze multimodal content in scientific literature. To answer this demand, we present Uni-SMART (Universal Science Multimodal Analysis and Research Transformer), an innovative model designed for in-depth understanding of multimodal scientific literature. Through rigorous quantitative evaluation across several domains, Uni-SMART demonstrates superior performance over leading text-focused LLMs. Furthermore, our exploration extends to practical applications, including patent infringement detection and nuanced analysis of charts. These applications not only highlight Uni-SMART's adaptability but also its potential to revolutionize how we interact with scientific literature.",
"raw": "In scientific research and its application, scientific literature analysis is crucial as it allows researchers to build on the work of others. However, the fast growth of scientific knowledge has led to a massive increase in scholarly articles, making in-depth literature analysis increasingly challenging and time-consuming. The emergence of Large Language Models (LLMs) has offered a new way to address this challenge. Known for their strong abilities in summarizing texts, LLMs are seen as a potential tool to improve the analysis of scientific literature. However, existing LLMs have their own limits. Scientific literature often includes a wide range of multimodal elements, such as molecular structure, tables, and charts, which are hard for text-focused LLMs to understand and analyze. This issue points to the urgent need for new solutions that can fully understand and analyze multimodal content in scientific literature. To answer this demand, we present Uni-SMART (Universal Science Multimodal Analysis and Research Transformer), an innovative model designed for in-depth understanding of multimodal scientific literature. Through rigorous quantitative evaluation across several domains, Uni-SMART demonstrates superior performance over leading text-focused LLMs. Furthermore, our exploration extends to practical applications, including patent infringement detection and nuanced analysis of charts. These applications not only highlight Uni-SMART's adaptability but also its potential to revolutionize how we interact with scientific literature.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Uni-SMART
Universal Science Multimodal Analysis and Research Transformer
https://huggingface.co/papers/2403.10301
In scientific research and its application, scientific literature analysis is crucial as it allows researchers to build on the work of others. However, the fast growth of scientific knowledge has led to a massive increase in scholarly articles, making in-depth literature analysis increasingly challenging and time-consuming. The emergence of Large Language Models (LLMs) has offered a new way to address this challenge. Known for their strong abilities in summarizing texts, LLMs are seen as a potential tool to improve the analysis of scientific literature. However, existing LLMs have their own limits. Scientific literature often includes a wide range of multimodal elements, such as molecular structure, tables, and charts, which are hard for text-focused LLMs to understand and analyze. This issue points to the urgent need for new solutions that can fully understand and analyze multimodal content in scientific literature. To answer this demand, we present Uni-SMART (Universal Science Multimodal Analysis and Research Transformer), an innovative model designed for in-depth understanding of multimodal scientific literature. Through rigorous quantitative evaluation across several domains, Uni-SMART demonstrates superior performance over leading text-focused LLMs. Furthermore, our exploration extends to practical applications, including patent infringement detection and nuanced analysis of charts. These applications not only highlight Uni-SMART's adaptability but also its potential to revolutionize how we interact with scientific literature.
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674929746905-60f1abe7544c2adfd699860c.jpeg",
"fullname": "AK",
"name": "akhaliq",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 5205,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/60f1abe7544c2adfd699860c/GzeIHBeQ5eva5pJamjMT2.png"
}
] | [] | [
{
"reaction": "👍",
"users": [
"samusenps",
"muhtasham",
"osanseviero",
"hypnopump",
"ajibawa-2023",
"clem",
"AtAndDev"
],
"count": 7
},
{
"reaction": "🚀",
"users": [
"samusenps",
"muhtasham",
"hypnopump",
"clem",
"AtAndDev"
],
"count": 5
}
] | 2024-03-18T16:59:38.000Z | 2024-03-18T16:59:38.188Z | [] | /posts/akhaliq/187303171643734 | 3,255 | 0 |
770487932075465 | [
{
"type": "text",
"value": "Our 🐑 PECoRe 🐑 method to detect & attribute context usage in LM generations finally has an official Gradio demo! 🚀",
"raw": "Our 🐑 PECoRe 🐑 method to detect & attribute context usage in LM generations finally has an official Gradio demo! 🚀",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/gsarti/pecore",
"href": null,
"resource": {
"type": "space",
"id": "gsarti/pecore",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/gsarti/pecore",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Highlights:",
"raw": "Highlights:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔍 Context attribution for several decoder-only and encoder-decoder models using convenient presets",
"raw": "🔍 Context attribution for several decoder-only and encoder-decoder models using convenient presets",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔍 Uses only LM internals to faithfully reflect context usage, no additional detector involved",
"raw": "🔍 Uses only LM internals to faithfully reflect context usage, no additional detector involved",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔍 Highly parametrizable, export Python & Shell code snippets to run on your machine using 🐛 Inseq CLI (",
"raw": "🔍 Highly parametrizable, export Python & Shell code snippets to run on your machine using 🐛 Inseq CLI (",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/inseq-team/inseq",
"href": "https://github.com/inseq-team/inseq",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": ")",
"raw": ")",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Want to use PECoRe for your LMs? Feedback and comments are welcome! 🤗",
"raw": "Want to use PECoRe for your LMs? Feedback and comments are welcome! 🤗",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Our 🐑 PECoRe 🐑 method to detect & attribute context usage in LM generations finally has an official Gradio demo! 🚀
https://huggingface.co/spaces/gsarti/pecore
Highlights:
🔍 Context attribution for several decoder-only and encoder-decoder models using convenient presets
🔍 Uses only LM internals to faithfully reflect context usage, no additional detector involved
🔍 Highly parametrizable, export Python & Shell code snippets to run on your machine using 🐛 Inseq CLI (https://github.com/inseq-team/inseq)
Want to use PECoRe for your LMs? Feedback and comments are welcome! 🤗
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1670231290373-5e7749883d77a72421292d07.jpeg",
"fullname": "Gabriele Sarti",
"name": "gsarti",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 205,
"isFollowing": false
} | [] | [] | [
{
"reaction": "❤️",
"users": [
"samusenps",
"muhtasham",
"clem",
"louisbrulenaudet",
"santiviquez"
],
"count": 5
}
] | 2024-03-18T14:45:58.000Z | 2024-03-21T09:07:44.600Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1670231290373-5e7749883d77a72421292d07.jpeg",
"fullname": "Gabriele Sarti",
"name": "gsarti",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 205,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1657144463525-629a173153a72d997d3f57d0.jpeg",
"fullname": "Santiago Viquez",
"name": "santiviquez",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 84,
"isFollowing": false
}
] | /posts/gsarti/770487932075465 | 2,212 | 3 |
399329903001758 | [
{
"type": "text",
"value": "🚀🕺🌟 New Research Alert - CVPR 2024! 🌟 💃🏻🚀",
"raw": "🚀🕺🌟 New Research Alert - CVPR 2024! 🌟 💃🏻🚀",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📄 Title: NECA: Neural Customizable Human Avatar 🌟🚀",
"raw": "📄 Title: NECA: Neural Customizable Human Avatar 🌟🚀",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📝 Description: The NECA paper presents a novel method for creating customizable human avatars from video, allowing detailed manipulation of pose, shadow, shape, lighting, and texture for realistic rendering and editing.",
"raw": "📝 Description: The NECA paper presents a novel method for creating customizable human avatars from video, allowing detailed manipulation of pose, shadow, shape, lighting, and texture for realistic rendering and editing.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "👥 Authors: Junjin Xiao, Qing Zhang, Zhan Xu, and Wei-Shi Zheng",
"raw": "👥 Authors: Junjin Xiao, Qing Zhang, Zhan Xu, and Wei-Shi Zheng",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📅 Conference: CVPR, Jun 17-21, 2024 | Seattle WA, USA 🇺🇸",
"raw": "📅 Conference: CVPR, Jun 17-21, 2024 | Seattle WA, USA 🇺🇸",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔗 Paper: ",
"raw": "🔗 Paper: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2403.10335",
"href": null,
"resource": {
"type": "paper",
"id": "2403.10335",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2403.10335",
"code": null,
"user": null,
"label": "NECA: Neural Customizable Human Avatar (2403.10335)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📁 Repository: ",
"raw": "📁 Repository: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/iSEE-Laboratory/NECA",
"href": "https://github.com/iSEE-Laboratory/NECA",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📚 More Papers: more cutting-edge research presented at other conferences in the ",
"raw": "📚 More Papers: more cutting-edge research presented at other conferences in the ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers",
"href": null,
"resource": {
"type": "space",
"id": "DmitryRyumin/NewEraAI-Papers",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " curated by ",
"raw": " curated by ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@DmitryRyumin",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "DmitryRyumin",
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🚀 Added to the Avatars Collection: ",
"raw": "🚀 Added to the Avatars Collection: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36",
"href": null,
"resource": {
"type": "collection",
"id": "DmitryRyumin/avatars-65df37cdf81fec13d4dbac36",
"discussionNum": null
},
"url": "https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔍 Keywords: #NECA #AvatarCustomization #RealisticRendering #HumanRepresentation #CVPR2024 #DeepLearning #Animation #Innovation",
"raw": "🔍 Keywords: #NECA #AvatarCustomization #RealisticRendering #HumanRepresentation #CVPR2024 #DeepLearning #Animation #Innovation",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 🚀🕺🌟 New Research Alert - CVPR 2024! 🌟 💃🏻🚀
📄 Title: NECA: Neural Customizable Human Avatar 🌟🚀
📝 Description: The NECA paper presents a novel method for creating customizable human avatars from video, allowing detailed manipulation of pose, shadow, shape, lighting, and texture for realistic rendering and editing.
👥 Authors: Junjin Xiao, Qing Zhang, Zhan Xu, and Wei-Shi Zheng
📅 Conference: CVPR, Jun 17-21, 2024 | Seattle WA, USA 🇺🇸
🔗 Paper: https://huggingface.co/papers/2403.10335
📁 Repository: https://github.com/iSEE-Laboratory/NECA
📚 More Papers: more cutting-edge research presented at other conferences in the https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers curated by @DmitryRyumin
🚀 Added to the Avatars Collection: https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36
🔍 Keywords: #NECA #AvatarCustomization #RealisticRendering #HumanRepresentation #CVPR2024 #DeepLearning #Animation #Innovation | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg",
"fullname": "Dmitry Ryumin",
"name": "DmitryRyumin",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 377,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/vQPBLyXTQ4KSflLT8qw9_.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/N9MR8DEIIHfzndChRVXvT.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/qDg0JxXyBKSIolQIko31J.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/my2nrsK4N12kk7KrJShBC.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/UD95hXxppcFrz6eIqMlOB.png"
}
] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg",
"fullname": "Dmitry Ryumin",
"name": "DmitryRyumin",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 377
}
] | [
{
"reaction": "👍",
"users": [
"DmitryRyumin",
"osanseviero",
"victor",
"samusenps",
"clem",
"nvspavankalyanch",
"mexicanamerican"
],
"count": 7
},
{
"reaction": "🔥",
"users": [
"nvspavankalyanch"
],
"count": 1
}
] | 2024-03-18T10:42:54.000Z | 2024-03-18T10:42:54.114Z | [] | /posts/DmitryRyumin/399329903001758 | 1,923 | 0 |
792998889340102 | [
{
"type": "text",
"value": "🖴 The HPLT monolingual dataset has a new home!",
"raw": "🖴 The HPLT monolingual dataset has a new home!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "After being in touch with HPLT folks, I've transfered the data to their org. That only makes sense. You can find it below.",
"raw": "After being in touch with HPLT folks, I've transfered the data to their org. That only makes sense. You can find it below.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/HPLT/hplt_monolingual_v1_2",
"href": null,
"resource": {
"type": "dataset",
"id": "HPLT/hplt_monolingual_v1_2",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/HPLT/hplt_monolingual_v1_2",
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 🖴 The HPLT monolingual dataset has a new home!
After being in touch with HPLT folks, I've transfered the data to their org. That only makes sense. You can find it below.
https://huggingface.co/datasets/HPLT/hplt_monolingual_v1_2 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1594192845975-5e1e17b6fcf41d740b6996a8.jpeg",
"fullname": "Bram Vanroy",
"name": "BramVanroy",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 173,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🚀",
"users": [
"osanseviero",
"clem"
],
"count": 2
}
] | 2024-03-18T09:43:02.000Z | 2024-09-28T22:09:06.471Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1584020801691-noauth.jpeg",
"fullname": "Stefan Schweter",
"name": "stefan-it",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 1868,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1594192845975-5e1e17b6fcf41d740b6996a8.jpeg",
"fullname": "Bram Vanroy",
"name": "BramVanroy",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 173,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6147363543eb04c443cd4e39/Ofw2_zBsPPpj1LovQep0L.jpeg",
"fullname": "Meliksah Turker",
"name": "meliksahturker",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 8,
"isFollowing": false
}
] | /posts/BramVanroy/792998889340102 | 1,723 | 3 |
248867169918497 | [
{
"type": "text",
"value": "How about engaging in a creative chat with your favorite video character? 💬",
"raw": "How about engaging in a creative chat with your favorite video character? 💬",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@chansung",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "chansung",
"label": null,
"lang": null
},
{
"type": "text",
"value": " and I worked on a weekend project combining the benefits of Gemini 1.0 and powerful chat models like Zephyr to demo this. ",
"raw": " and I worked on a weekend project combining the benefits of Gemini 1.0 and powerful chat models like Zephyr to demo this. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "We use Gemini 1.0 to produce the personality traits of any character found in an input video. We then prepare a system prompt with the discovered traits to start chatting with an LLM (Zephyr in this case). ",
"raw": "We use Gemini 1.0 to produce the personality traits of any character found in an input video. We then prepare a system prompt with the discovered traits to start chatting with an LLM (Zephyr in this case). ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Managing a video captioning model is a little out of our expertise, hence Gemini FTW here 😶🌫️",
"raw": "Managing a video captioning model is a little out of our expertise, hence Gemini FTW here 😶🌫️",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "👨💻 Code: ",
"raw": "👨💻 Code: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/deep-diver/Vid2Persona",
"href": "https://github.com/deep-diver/Vid2Persona",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🤗 Demo: ",
"raw": "🤗 Demo: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/spaces/chansung/vid2persona",
"href": null,
"resource": {
"type": "space",
"id": "chansung/vid2persona",
"discussionNum": null
},
"url": "https://hf.co/spaces/chansung/vid2persona",
"code": null,
"user": null,
"label": null,
"lang": null
}
] | How about engaging in a creative chat with your favorite video character? 💬
@chansung and I worked on a weekend project combining the benefits of Gemini 1.0 and powerful chat models like Zephyr to demo this.
We use Gemini 1.0 to produce the personality traits of any character found in an input video. We then prepare a system prompt with the discovered traits to start chatting with an LLM (Zephyr in this case).
Managing a video captioning model is a little out of our expertise, hence Gemini FTW here 😶🌫️
👨💻 Code: https://github.com/deep-diver/Vid2Persona
🤗 Demo: https://hf.co/spaces/chansung/vid2persona | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1649681653581-5f7fbd813e94f16a85448745.jpeg",
"fullname": "Sayak Paul",
"name": "sayakpaul",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 459,
"isFollowing": false
} | [] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1659971187637-60d3b57ad7b174177faabd6e.jpeg",
"fullname": "chansung park",
"name": "chansung",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 2695
}
] | [
{
"reaction": "❤️",
"users": [
"chansung",
"osanseviero",
"CharlieWells",
"samusenps",
"clem"
],
"count": 5
}
] | 2024-03-18T09:38:55.000Z | 2024-03-18T09:38:55.873Z | [] | /posts/sayakpaul/248867169918497 | 1,909 | 0 |
254341354995663 | [
{
"type": "text",
"value": "🌟 New Research Alert - CVPR 2024! 🌟",
"raw": "🌟 New Research Alert - CVPR 2024! 🌟",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📄 Title: \"SVGDreamer: Text-Guided SVG Generation with Diffusion Model\"",
"raw": "📄 Title: \"SVGDreamer: Text-Guided SVG Generation with Diffusion Model\"",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📝 TL;DR: Given a text prompt, SVGDreamer can generate editable and versatile high-fidelity vector graphics.",
"raw": "📝 TL;DR: Given a text prompt, SVGDreamer can generate editable and versatile high-fidelity vector graphics.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📝 Description: In this work, the author has introduced SVGDreamer, an innovative model for text-guided vector graphics synthesis. SVGDreamer incorporates two crucial technical designs: semantic-driven image vectorization (SIVE) and vectorized particle-based score distillation (VPSD), which empower our model to generate vector graphics with high editability, superior visual quality, and notable diversity. ",
"raw": "📝 Description: In this work, the author has introduced SVGDreamer, an innovative model for text-guided vector graphics synthesis. SVGDreamer incorporates two crucial technical designs: semantic-driven image vectorization (SIVE) and vectorized particle-based score distillation (VPSD), which empower our model to generate vector graphics with high editability, superior visual quality, and notable diversity. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "👥 Authors: [Ximing Xing](",
"raw": "👥 Authors: [Ximing Xing](",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://ximinng.github.io/",
"href": "https://ximinng.github.io/",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "), Haitao Zhou, Chuang Wang, [Jing zhang](",
"raw": "), Haitao Zhou, Chuang Wang, [Jing zhang](",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://hellojing89.github.io/",
"href": "https://hellojing89.github.io/",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "), [Dong Xu](",
"raw": "), [Dong Xu](",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://www.cs.hku.hk/index.php/people/academic-staff/dongxu",
"href": "https://www.cs.hku.hk/index.php/people/academic-staff/dongxu",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "), and [Qian Yu](",
"raw": "), and [Qian Yu](",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://yuqian1023.github.io/",
"href": "https://yuqian1023.github.io/",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": ")",
"raw": ")",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📅 Conference: CVPR, Jun 17-21, 2024 | Seattle WA, USA 🇺🇸",
"raw": "📅 Conference: CVPR, Jun 17-21, 2024 | Seattle WA, USA 🇺🇸",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔍 Keywords: #SVGDreamer #Text-to-SVG #SVG #Diffusion #CVPR2024",
"raw": "🔍 Keywords: #SVGDreamer #Text-to-SVG #SVG #Diffusion #CVPR2024",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Resources:",
"raw": "Resources:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔗 arXiv Paper: ",
"raw": "🔗 arXiv Paper: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://arxiv.org/abs/2312.16476",
"href": "https://arxiv.org/abs/2312.16476",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🌐 Github Page: ",
"raw": "🌐 Github Page: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://ximinng.github.io/SVGDreamer-project/",
"href": "https://ximinng.github.io/SVGDreamer-project/",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📁 Code Repository: ",
"raw": "📁 Code Repository: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/ximinng/SVGDreamer",
"href": "https://github.com/ximinng/SVGDreamer",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📜 Blog: ",
"raw": "📜 Blog: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/blog/xingxm/svgdreamer",
"href": "https://huggingface.co/blog/xingxm/svgdreamer",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 🌟 New Research Alert - CVPR 2024! 🌟
📄 Title: "SVGDreamer: Text-Guided SVG Generation with Diffusion Model"
📝 TL;DR: Given a text prompt, SVGDreamer can generate editable and versatile high-fidelity vector graphics.
📝 Description: In this work, the author has introduced SVGDreamer, an innovative model for text-guided vector graphics synthesis. SVGDreamer incorporates two crucial technical designs: semantic-driven image vectorization (SIVE) and vectorized particle-based score distillation (VPSD), which empower our model to generate vector graphics with high editability, superior visual quality, and notable diversity.
👥 Authors: [Ximing Xing](https://ximinng.github.io/), Haitao Zhou, Chuang Wang, [Jing zhang](https://hellojing89.github.io/), [Dong Xu](https://www.cs.hku.hk/index.php/people/academic-staff/dongxu), and [Qian Yu](https://yuqian1023.github.io/)
📅 Conference: CVPR, Jun 17-21, 2024 | Seattle WA, USA 🇺🇸
🔍 Keywords: #SVGDreamer #Text-to-SVG #SVG #Diffusion #CVPR2024
Resources:
🔗 arXiv Paper: https://arxiv.org/abs/2312.16476
🌐 Github Page: https://ximinng.github.io/SVGDreamer-project/
📁 Code Repository: https://github.com/ximinng/SVGDreamer
📜 Blog: https://huggingface.co/blog/xingxm/svgdreamer
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63e25fce52b7578dba4974a1/3lVORs5gTVoTfPRaOnoSI.png",
"fullname": "Ximing Xing",
"name": "xingxm",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 17,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/63e25fce52b7578dba4974a1/h2SghZM-7QTSllsveaYmC.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/63e25fce52b7578dba4974a1/iDA57W-I1a90CID-96R6w.png"
}
] | [] | [
{
"reaction": "🔥",
"users": [
"xingxm",
"DmitryRyumin",
"osanseviero",
"samusenps",
"clem"
],
"count": 5
},
{
"reaction": "❤️",
"users": [
"xingxm",
"andersjohansson",
"clem"
],
"count": 3
},
{
"reaction": "🚀",
"users": [
"xingxm"
],
"count": 1
},
{
"reaction": "👀",
"users": [
"xingxm"
],
"count": 1
}
] | 2024-03-18T09:12:37.000Z | 2024-03-18T09:17:08.089Z | [] | /posts/xingxm/254341354995663 | 1,452 | 0 |
764500946057404 | [
{
"type": "text",
"value": "xAI releases the weights for Grok-1. Apparently it's a 314B MoE with 25% of the weights active on a given token.",
"raw": "xAI releases the weights for Grok-1. Apparently it's a 314B MoE with 25% of the weights active on a given token.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Blog: ",
"raw": "Blog: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://x.ai/blog/grok-os",
"href": "https://x.ai/blog/grok-os",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Code: ",
"raw": "Code: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/xai-org/grok",
"href": "https://github.com/xai-org/grok",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Model: ",
"raw": "Model: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/xai-org/grok-1",
"href": null,
"resource": {
"type": "model",
"id": "xai-org/grok-1",
"discussionNum": null
},
"url": "https://huggingface.co/xai-org/grok-1",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Weights: magnet:?xt=urn:btih:5f96d43576e3d386c9ba65b883210a393b68210e&tr=https%3A%2F%2Facademictorrents.com%2Fannounce.php&tr=udp%3A%2F%2Ftracker.coppersurfer.tk%3A6969&tr=udp%3A%2F%2Ftracker.opentrackr.org%3A1337%2Fannounce",
"raw": "Weights: magnet:?xt=urn:btih:5f96d43576e3d386c9ba65b883210a393b68210e&tr=https%3A%2F%2Facademictorrents.com%2Fannounce.php&tr=udp%3A%2F%2Ftracker.coppersurfer.tk%3A6969&tr=udp%3A%2F%2Ftracker.opentrackr.org%3A1337%2Fannounce",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | xAI releases the weights for Grok-1. Apparently it's a 314B MoE with 25% of the weights active on a given token.
Blog: https://x.ai/blog/grok-os
Code: https://github.com/xai-org/grok
Model: https://huggingface.co/xai-org/grok-1
Weights: magnet:?xt=urn:btih:5f96d43576e3d386c9ba65b883210a393b68210e&tr=https%3A%2F%2Facademictorrents.com%2Fannounce.php&tr=udp%3A%2F%2Ftracker.coppersurfer.tk%3A6969&tr=udp%3A%2F%2Ftracker.opentrackr.org%3A1337%2Fannounce | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/657217faabb25ed8aedd5e48/UUHAXeGtOnQBXFD3nYtf2.jpeg",
"fullname": "Vlad Bogolin",
"name": "vladbogo",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 109,
"isFollowing": false
} | [] | [] | [
{
"reaction": "❤️",
"users": [
"samusenps",
"MrNobodyzac",
"sarthakkapila",
"jackyes",
"AnaDP",
"osanseviero",
"clem",
"LeroyDyer",
"KaleDivergence"
],
"count": 9
},
{
"reaction": "🔥",
"users": [
"samusenps",
"GO4code",
"mathiasn1",
"jamone",
"LexFree",
"kramp",
"clem",
"KaleDivergence"
],
"count": 8
},
{
"reaction": "👀",
"users": [
"samusenps",
"malhajar",
"Carlainsworth",
"clem",
"KaleDivergence"
],
"count": 5
},
{
"reaction": "🚀",
"users": [
"samusenps",
"KaleDivergence"
],
"count": 2
}
] | 2024-03-17T20:00:00.000Z | 2024-03-18T03:42:28.441Z | [
{
"avatarUrl": "/avatars/3a56d393543f0390aa7b03a0713f96e6.svg",
"fullname": "Mojtaba Fayazi",
"name": "S4mpl3r",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "/avatars/a33ff741af64e64a6714d913f83de9db.svg",
"fullname": "baibizhe",
"name": "baibizhe",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
}
] | /posts/vladbogo/764500946057404 | 1,605 | 2 |
102498036261336 | [
{
"type": "text",
"value": "there were only 5 Major Releases last week !",
"raw": "there were only 5 Major Releases last week !",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "😱 it's so over ",
"raw": "😱 it's so over ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | there were only 5 Major Releases last week !
😱 it's so over | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a3bb1cd0d8c2c2169f0b88/eT2TS0IlQbZtz-F_zHLz9.jpeg",
"fullname": "Joseph [open/acc] Pollack",
"name": "Tonic",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 313,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🤯",
"users": [
"samusenps",
"osanseviero",
"ABX-AI",
"pcuenq",
"thomwolf",
"LucienL"
],
"count": 6
},
{
"reaction": "😎",
"users": [
"clem"
],
"count": 1
}
] | 2024-03-17T06:21:08.000Z | 2024-03-17T08:42:14.237Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6538119803519fddb4a17e10/ffJMkdx-rM7VvLTCM6ri_.jpeg",
"fullname": "samusenps",
"name": "samusenps",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 91,
"isFollowing": false
}
] | /posts/Tonic/102498036261336 | 1,616 | 1 |
666702393639713 | [
{
"type": "text",
"value": "What’s missing in today’s AI?",
"raw": "What’s missing in today’s AI?",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Adaptive AI Assistant(s) (AAA) ",
"raw": "Adaptive AI Assistant(s) (AAA) ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "AAA - an ai assistant that gradually reflects an intelligently amplified version of you. ",
"raw": "AAA - an ai assistant that gradually reflects an intelligently amplified version of you. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "All I see in today's advance AI systems are tools that are heavily engineered to do all the work while we seat back and watch.",
"raw": "All I see in today's advance AI systems are tools that are heavily engineered to do all the work while we seat back and watch.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Shouldn't it be:",
"raw": "Shouldn't it be:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- tools that augment our information processing capabilities? (as once proposed by ",
"raw": "- tools that augment our information processing capabilities? (as once proposed by ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@karpathy",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "karpathy",
"label": null,
"lang": null
},
{
"type": "text",
"value": ")",
"raw": ")",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- tools that can adapt to each person's needs? ",
"raw": "- tools that can adapt to each person's needs? ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- tools that reflect a more intelligent version of ourselves?",
"raw": "- tools that reflect a more intelligent version of ourselves?",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Because think about it, if we continue to build AI systems that we end up heavily relying on for even the most complex tasks, without actively involving us in the process, we risk losing out on the opportunity for personal growth and development.",
"raw": "Because think about it, if we continue to build AI systems that we end up heavily relying on for even the most complex tasks, without actively involving us in the process, we risk losing out on the opportunity for personal growth and development.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Just saying.",
"raw": "Just saying.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Triple A is a more compelling path:)",
"raw": "Triple A is a more compelling path:)",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "And I'm writing a paper on this.",
"raw": "And I'm writing a paper on this.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | What’s missing in today’s AI?
Adaptive AI Assistant(s) (AAA)
AAA - an ai assistant that gradually reflects an intelligently amplified version of you.
All I see in today's advance AI systems are tools that are heavily engineered to do all the work while we seat back and watch.
Shouldn't it be:
- tools that augment our information processing capabilities? (as once proposed by @karpathy)
- tools that can adapt to each person's needs?
- tools that reflect a more intelligent version of ourselves?
Because think about it, if we continue to build AI systems that we end up heavily relying on for even the most complex tasks, without actively involving us in the process, we risk losing out on the opportunity for personal growth and development.
Just saying.
Triple A is a more compelling path:)
And I'm writing a paper on this.
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6438a9027de34e8ea7e4b257/vib8QSd1AWMr_bR9ig_xJ.jpeg",
"fullname": "Jaward Sesay",
"name": "Jaward",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 191,
"isFollowing": false
} | [
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/C2CSQ7rYoguBvBbKiFZ9U.mp4"
}
] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1660434061546-62f83661fe21cc4875221c0f.jpeg",
"fullname": "Andrej K",
"name": "karpathy",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 476
}
] | [
{
"reaction": "👀",
"users": [
"osanseviero",
"clem",
"dathinge",
"Tom-Neverwinter"
],
"count": 4
},
{
"reaction": "➕",
"users": [
"samusenps",
"smy503",
"dathinge"
],
"count": 3
},
{
"reaction": "❤️",
"users": [
"dathinge",
"Tom-Neverwinter"
],
"count": 2
}
] | 2024-03-17T01:14:42.000Z | 2024-03-22T00:14:30.277Z | [] | /posts/Jaward/666702393639713 | 936 | 0 |
496583626851543 | [
{
"type": "text",
"value": "🚀💃🏻🌟 New Research Alert - CVPR 2024! 🌟🕺 🚀",
"raw": "🚀💃🏻🌟 New Research Alert - CVPR 2024! 🌟🕺 🚀",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📄 Title: Animatable Gaussians: Learning Pose-dependent Gaussian Maps for High-fidelity Human Avatar Modeling 🌟🚀",
"raw": "📄 Title: Animatable Gaussians: Learning Pose-dependent Gaussian Maps for High-fidelity Human Avatar Modeling 🌟🚀",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📝 Description: Animatable Gaussians - a novel method for creating lifelike human avatars from RGB videos, utilizing 2D CNNs and 3D Gaussian splatting to capture pose-dependent garment details and dynamic appearances with high fidelity.",
"raw": "📝 Description: Animatable Gaussians - a novel method for creating lifelike human avatars from RGB videos, utilizing 2D CNNs and 3D Gaussian splatting to capture pose-dependent garment details and dynamic appearances with high fidelity.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "👥 Authors: Zhe Li, Zerong Zheng, Lizhen Wang, and Yebin Liu",
"raw": "👥 Authors: Zhe Li, Zerong Zheng, Lizhen Wang, and Yebin Liu",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📅 Conference: CVPR, Jun 17-21, 2024 | Seattle WA, USA 🇺🇸",
"raw": "📅 Conference: CVPR, Jun 17-21, 2024 | Seattle WA, USA 🇺🇸",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔗 Paper: ",
"raw": "🔗 Paper: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2311.16096",
"href": null,
"resource": {
"type": "paper",
"id": "2311.16096",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2311.16096",
"code": null,
"user": null,
"label": "Animatable Gaussians: Learning Pose-dependent Gaussian Maps for\n High-fidelity Human Avatar Modeling (2311.16096)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🌐 Github Page: ",
"raw": "🌐 Github Page: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://animatable-gaussians.github.io",
"href": "https://animatable-gaussians.github.io",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📁 Repository: ",
"raw": "📁 Repository: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/lizhe00/AnimatableGaussians",
"href": "https://github.com/lizhe00/AnimatableGaussians",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📺 Video: ",
"raw": "📺 Video: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://www.youtube.com/watch?v=kOmZxD0HxZI",
"href": "https://www.youtube.com/watch?v=kOmZxD0HxZI",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📚 More Papers: more cutting-edge research presented at other conferences in the ",
"raw": "📚 More Papers: more cutting-edge research presented at other conferences in the ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers",
"href": null,
"resource": {
"type": "space",
"id": "DmitryRyumin/NewEraAI-Papers",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " curated by ",
"raw": " curated by ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@DmitryRyumin",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "DmitryRyumin",
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🚀 Added to the Avatars Collection: ",
"raw": "🚀 Added to the Avatars Collection: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36",
"href": null,
"resource": {
"type": "collection",
"id": "DmitryRyumin/avatars-65df37cdf81fec13d4dbac36",
"discussionNum": null
},
"url": "https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔍 Keywords: #AnimatableGaussians #HumanAvatars #3DGaussianSplatting #CVPR2024 #DeepLearning #Animation #Innovation",
"raw": "🔍 Keywords: #AnimatableGaussians #HumanAvatars #3DGaussianSplatting #CVPR2024 #DeepLearning #Animation #Innovation",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 🚀💃🏻🌟 New Research Alert - CVPR 2024! 🌟🕺 🚀
📄 Title: Animatable Gaussians: Learning Pose-dependent Gaussian Maps for High-fidelity Human Avatar Modeling 🌟🚀
📝 Description: Animatable Gaussians - a novel method for creating lifelike human avatars from RGB videos, utilizing 2D CNNs and 3D Gaussian splatting to capture pose-dependent garment details and dynamic appearances with high fidelity.
👥 Authors: Zhe Li, Zerong Zheng, Lizhen Wang, and Yebin Liu
📅 Conference: CVPR, Jun 17-21, 2024 | Seattle WA, USA 🇺🇸
🔗 Paper: https://huggingface.co/papers/2311.16096
🌐 Github Page: https://animatable-gaussians.github.io
📁 Repository: https://github.com/lizhe00/AnimatableGaussians
📺 Video: https://www.youtube.com/watch?v=kOmZxD0HxZI
📚 More Papers: more cutting-edge research presented at other conferences in the https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers curated by @DmitryRyumin
🚀 Added to the Avatars Collection: https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36
🔍 Keywords: #AnimatableGaussians #HumanAvatars #3DGaussianSplatting #CVPR2024 #DeepLearning #Animation #Innovation | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg",
"fullname": "Dmitry Ryumin",
"name": "DmitryRyumin",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 377,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/dWW1DwUDs-vM3bCcSYvYO.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/cN58KzSMpflI9y2cErMVt.png"
},
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/ev3XN6iPMzE4oEk8VviN8.mp4"
},
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/6NNAPnITUhMjUTRoIUWQI.mp4"
},
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/-As_1PHVygmF0NtiTYWz4.mp4"
},
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/7WqH_2agsOFllr_EVw1AH.mp4"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/uj4OeptON6TkaiyFxCQWM.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/SjZoaWgFK0ghlE37Fjzpx.png"
}
] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg",
"fullname": "Dmitry Ryumin",
"name": "DmitryRyumin",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 377
}
] | [
{
"reaction": "🔥",
"users": [
"DmitryRyumin",
"jawadur",
"vladbogo",
"ktibbs9413",
"samusenps",
"osanseviero",
"AlekseiPravdin",
"merve",
"thedigitized1",
"kingabzpro",
"pcuenq",
"clem",
"2dts"
],
"count": 13
},
{
"reaction": "🚀",
"users": [
"samusenps",
"merve",
"osanseviero",
"pcuenq"
],
"count": 4
}
] | 2024-03-15T20:50:13.000Z | 2024-03-15T20:50:13.967Z | [] | /posts/DmitryRyumin/496583626851543 | 899 | 0 |
191879295455659 | [
{
"type": "text",
"value": "MM1",
"raw": "MM1",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Methods, Analysis & Insights from Multimodal LLM Pre-training",
"raw": "Methods, Analysis & Insights from Multimodal LLM Pre-training",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2403.09611",
"href": null,
"resource": {
"type": "paper",
"id": "2403.09611",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2403.09611",
"code": null,
"user": null,
"label": "MM1: Methods, Analysis & Insights from Multimodal LLM Pre-training (2403.09611)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "In this work, we discuss building performant Multimodal Large Language Models (MLLMs). In particular, we study the importance of various architecture components and data choices. Through careful and comprehensive ablations of the image encoder, the vision language connector, and various pre-training data choices, we identified several crucial design lessons. For example, we demonstrate that for large-scale multimodal pre-training using a careful mix of image-caption, interleaved image-text, and text-only data is crucial for achieving state-of-the-art (SOTA) few-shot results across multiple benchmarks, compared to other published pre-training results. Further, we show that the image encoder together with image resolution and the image token count has substantial impact, while the vision-language connector design is of comparatively negligible importance. By scaling up the presented recipe, we build MM1, a family of multimodal models up to 30B parameters, consisting of both dense models and mixture-of-experts (MoE) variants, that are SOTA in pre-training metrics and achieve competitive performance after supervised fine-tuning on a range of established multimodal benchmarks. Thanks to large-scale pre-training, MM1 enjoys appealing properties such as enhanced in-context learning, and multi-image reasoning, enabling few-shot chain-of-thought prompting.",
"raw": "In this work, we discuss building performant Multimodal Large Language Models (MLLMs). In particular, we study the importance of various architecture components and data choices. Through careful and comprehensive ablations of the image encoder, the vision language connector, and various pre-training data choices, we identified several crucial design lessons. For example, we demonstrate that for large-scale multimodal pre-training using a careful mix of image-caption, interleaved image-text, and text-only data is crucial for achieving state-of-the-art (SOTA) few-shot results across multiple benchmarks, compared to other published pre-training results. Further, we show that the image encoder together with image resolution and the image token count has substantial impact, while the vision-language connector design is of comparatively negligible importance. By scaling up the presented recipe, we build MM1, a family of multimodal models up to 30B parameters, consisting of both dense models and mixture-of-experts (MoE) variants, that are SOTA in pre-training metrics and achieve competitive performance after supervised fine-tuning on a range of established multimodal benchmarks. Thanks to large-scale pre-training, MM1 enjoys appealing properties such as enhanced in-context learning, and multi-image reasoning, enabling few-shot chain-of-thought prompting.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | MM1
Methods, Analysis & Insights from Multimodal LLM Pre-training
https://huggingface.co/papers/2403.09611
In this work, we discuss building performant Multimodal Large Language Models (MLLMs). In particular, we study the importance of various architecture components and data choices. Through careful and comprehensive ablations of the image encoder, the vision language connector, and various pre-training data choices, we identified several crucial design lessons. For example, we demonstrate that for large-scale multimodal pre-training using a careful mix of image-caption, interleaved image-text, and text-only data is crucial for achieving state-of-the-art (SOTA) few-shot results across multiple benchmarks, compared to other published pre-training results. Further, we show that the image encoder together with image resolution and the image token count has substantial impact, while the vision-language connector design is of comparatively negligible importance. By scaling up the presented recipe, we build MM1, a family of multimodal models up to 30B parameters, consisting of both dense models and mixture-of-experts (MoE) variants, that are SOTA in pre-training metrics and achieve competitive performance after supervised fine-tuning on a range of established multimodal benchmarks. Thanks to large-scale pre-training, MM1 enjoys appealing properties such as enhanced in-context learning, and multi-image reasoning, enabling few-shot chain-of-thought prompting.
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674929746905-60f1abe7544c2adfd699860c.jpeg",
"fullname": "AK",
"name": "akhaliq",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 5205,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/60f1abe7544c2adfd699860c/PlvJG9EwrUetEhI7DRoU5.png"
}
] | [] | [
{
"reaction": "👍",
"users": [
"zhoudasheng",
"merve",
"abidlabs",
"samusenps",
"AlekseiPravdin",
"nezubn",
"osanseviero",
"pcuenq",
"sukuya",
"clem",
"nebulae09"
],
"count": 11
}
] | 2024-03-15T16:42:12.000Z | 2024-03-15T16:42:12.438Z | [] | /posts/akhaliq/191879295455659 | 1,094 | 0 |
199048850278454 | [
{
"type": "text",
"value": "Excited to share my latest preprint with ",
"raw": "Excited to share my latest preprint with ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@oliveiracaio",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "oliveiracaio",
"label": null,
"lang": null
},
{
"type": "text",
"value": " and ",
"raw": " and ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@fabee",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "fabee",
"label": null,
"lang": null
},
{
"type": "text",
"value": ": \"Platypose: Calibrated Zero-Shot Multi-Hypothesis 3D Human Motion Estimation\"! 💃🕺",
"raw": ": \"Platypose: Calibrated Zero-Shot Multi-Hypothesis 3D Human Motion Estimation\"! 💃🕺",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Platypose is our new approach to estimating 3D human motions from 2D observations. What makes it special? 🤩 Well, we're able to estimate multiple hypotheses for motion, which is pretty cool!",
"raw": "Platypose is our new approach to estimating 3D human motions from 2D observations. What makes it special? 🤩 Well, we're able to estimate multiple hypotheses for motion, which is pretty cool!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "But here's the best part: our model is not only accurate but also well-calibrated. We've made sure that the predicted uncertainty matches the models confidence, so you can have a better understanding of Platypose's predictions.",
"raw": "But here's the best part: our model is not only accurate but also well-calibrated. We've made sure that the predicted uncertainty matches the models confidence, so you can have a better understanding of Platypose's predictions.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Project Page: ",
"raw": "Project Page: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://sinzlab.org/publications/2024-platypose.html",
"href": "https://sinzlab.org/publications/2024-platypose.html",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Paper: ",
"raw": "Paper: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2403.06164",
"href": null,
"resource": {
"type": "paper",
"id": "2403.06164",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2403.06164",
"code": null,
"user": null,
"label": "Platypose: Calibrated Zero-Shot Multi-Hypothesis 3D Human Motion\n Estimation (2403.06164)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Model Weights: ",
"raw": "Model Weights: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/sinzlab/platypose",
"href": null,
"resource": {
"type": "model",
"id": "sinzlab/platypose",
"discussionNum": null
},
"url": "https://huggingface.co/sinzlab/platypose",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The code is open-source, so please leave a star ⭐",
"raw": "The code is open-source, so please leave a star ⭐",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/sinzlab/platypose",
"href": "https://github.com/sinzlab/platypose",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Excited to share my latest preprint with @oliveiracaio and @fabee: "Platypose: Calibrated Zero-Shot Multi-Hypothesis 3D Human Motion Estimation"! 💃🕺
Platypose is our new approach to estimating 3D human motions from 2D observations. What makes it special? 🤩 Well, we're able to estimate multiple hypotheses for motion, which is pretty cool!
But here's the best part: our model is not only accurate but also well-calibrated. We've made sure that the predicted uncertainty matches the models confidence, so you can have a better understanding of Platypose's predictions.
Project Page: https://sinzlab.org/publications/2024-platypose.html
Paper: https://huggingface.co/papers/2403.06164
Model Weights: https://huggingface.co/sinzlab/platypose
The code is open-source, so please leave a star ⭐
https://github.com/sinzlab/platypose | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60e6a490863d96fe2af1656b/1LkQFLbZhmfQ1-nq4qqlg.jpeg",
"fullname": "Paweł Pierzchlewicz",
"name": "ppierzc",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 9,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/60e6a490863d96fe2af1656b/x91WyY02wQ32jG2N3Nenh.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/60e6a490863d96fe2af1656b/ls45dzLgmXH14h6hFrtMm.gif"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/60e6a490863d96fe2af1656b/K326lGypna1kuLJASecXd.gif"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/60e6a490863d96fe2af1656b/8HW6EDVv4fLeS-hW6B9k3.gif"
}
] | [
{
"avatarUrl": "/avatars/c4d4e45cd0748820640a2cb92b964f24.svg",
"fullname": "Fabian",
"name": "fabee",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null
},
{
"avatarUrl": "/avatars/917adc1428740701881635fc768f8b5c.svg",
"fullname": "caio oliveira da silva",
"name": "oliveiracaio",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1
}
] | [
{
"reaction": "🚀",
"users": [
"dmnkbckr",
"TheKonstantinWilleke",
"oliveiracaio",
"Himadri-ML",
"samusenps",
"osanseviero",
"maxburg",
"louisbrulenaudet",
"ZennyKenny"
],
"count": 9
},
{
"reaction": "🔥",
"users": [
"oliveiracaio",
"TheKonstantinWilleke",
"Himadri-ML",
"AlekseiPravdin",
"maxburg",
"clem",
"Conradcon"
],
"count": 7
},
{
"reaction": "🤗",
"users": [
"ArneNix",
"TheKonstantinWilleke",
"oliveiracaio",
"Himadri-ML",
"osanseviero"
],
"count": 5
},
{
"reaction": "❤️",
"users": [
"Himadri-ML",
"samusenps",
"osanseviero"
],
"count": 3
},
{
"reaction": "👍",
"users": [
"trtm"
],
"count": 1
}
] | 2024-03-15T16:00:11.000Z | 2024-03-15T16:22:33.766Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6598219fa84537abc170084b/ud4XD_WnwEcSE25ygxTce.png",
"fullname": "Konstantin Willeke",
"name": "TheKonstantinWilleke",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2,
"isFollowing": false
}
] | /posts/ppierzc/199048850278454 | 818 | 1 |
186277884866701 | [
{
"type": "text",
"value": "KTO offers an easier way to preference train LLMs (only 👍👎 ratings are required). As part of #DataIsBetterTogether, I've written a tutorial on creating a preference dataset using Argilla and Spaces. ",
"raw": "KTO offers an easier way to preference train LLMs (only 👍👎 ratings are required). As part of #DataIsBetterTogether, I've written a tutorial on creating a preference dataset using Argilla and Spaces. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Using this approach, you can create a dataset that anyone with a Hugging Face account can contribute to 🤯",
"raw": "Using this approach, you can create a dataset that anyone with a Hugging Face account can contribute to 🤯",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "See an example of the kind of Space you can create following this tutorial here: ",
"raw": "See an example of the kind of Space you can create following this tutorial here: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/davanstrien/haiku-preferences",
"href": null,
"resource": {
"type": "space",
"id": "davanstrien/haiku-preferences",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/davanstrien/haiku-preferences",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🆕 New tutorial covers:",
"raw": "🆕 New tutorial covers:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "💬 Generating responses with open models",
"raw": "💬 Generating responses with open models",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "👥 Collecting human feedback (do you like this model response? Yes/No)",
"raw": "👥 Collecting human feedback (do you like this model response? Yes/No)",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🤖 Preparing a TRL-compatible dataset for training aligned models",
"raw": "🤖 Preparing a TRL-compatible dataset for training aligned models",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Check it out here: ",
"raw": "Check it out here: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/huggingface/data-is-better-together/tree/main/kto-preference",
"href": "https://github.com/huggingface/data-is-better-together/tree/main/kto-preference",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | KTO offers an easier way to preference train LLMs (only 👍👎 ratings are required). As part of #DataIsBetterTogether, I've written a tutorial on creating a preference dataset using Argilla and Spaces.
Using this approach, you can create a dataset that anyone with a Hugging Face account can contribute to 🤯
See an example of the kind of Space you can create following this tutorial here: https://huggingface.co/spaces/davanstrien/haiku-preferences
🆕 New tutorial covers:
💬 Generating responses with open models
👥 Collecting human feedback (do you like this model response? Yes/No)
🤖 Preparing a TRL-compatible dataset for training aligned models
Check it out here: https://github.com/huggingface/data-is-better-together/tree/main/kto-preference | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1627505688463-60107b385ac3e86b3ea4fc34.jpeg",
"fullname": "Daniel van Strien",
"name": "davanstrien",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 410,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/60107b385ac3e86b3ea4fc34/e3nyblnzCiC-J16hMSlyv.png"
}
] | [] | [
{
"reaction": "👍",
"users": [
"merve",
"transZ",
"osanseviero",
"AlekseiPravdin",
"jaigouk",
"JJhooww",
"jayomb",
"mmhamdy",
"AtAndDev",
"medmac01",
"pcuenq",
"mathiasn1",
"michaelbenayoun",
"dball",
"ZennyKenny",
"Cossale"
],
"count": 16
},
{
"reaction": "🔥",
"users": [
"AtAndDev",
"dark-pen",
"medmac01",
"michaelbenayoun",
"clem"
],
"count": 5
}
] | 2024-03-15T15:57:51.000Z | 2024-03-25T14:35:58.045Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/615c231c3a60fa8486f80634/t-kcY2gsYVcwrZrsTc0Fz.jpeg",
"fullname": "David Faragó",
"name": "dball",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1627505688463-60107b385ac3e86b3ea4fc34.jpeg",
"fullname": "Daniel van Strien",
"name": "davanstrien",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 410,
"isFollowing": false
}
] | /posts/davanstrien/186277884866701 | 974 | 2 |
723540773348725 | [
{
"type": "text",
"value": "With the new WebSight dataset, converting the screenshot of a web page to its corresponding HTML code is just one fine-tuning step away",
"raw": "With the new WebSight dataset, converting the screenshot of a web page to its corresponding HTML code is just one fine-tuning step away",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "We release a new version of our synthetic dataset:",
"raw": "We release a new version of our synthetic dataset:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "-Real images within web pages 🖼️",
"raw": "-Real images within web pages 🖼️",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "-Tailwind CSS 🎨",
"raw": "-Tailwind CSS 🎨",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "-2M examples 📈",
"raw": "-2M examples 📈",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Our initial release, v0.1, featured web designs in HTML + CSS, using simple colored rectangles as image placeholders.",
"raw": "Our initial release, v0.1, featured web designs in HTML + CSS, using simple colored rectangles as image placeholders.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "It was a good start to help models grasp the basics of web page structure and coding associations.",
"raw": "It was a good start to help models grasp the basics of web page structure and coding associations.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Yet, it was missing the look of a real website.",
"raw": "Yet, it was missing the look of a real website.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Improving visual appeal, we've now embedded actual images in our web designs, ensuring they match the site's content for a more authentic look.",
"raw": "Improving visual appeal, we've now embedded actual images in our web designs, ensuring they match the site's content for a more authentic look.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Switching to Tailwind CSS offers a more compact representation of the code.",
"raw": "Switching to Tailwind CSS offers a more compact representation of the code.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "We've also expanded our dataset to 2 million examples!",
"raw": "We've also expanded our dataset to 2 million examples!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "After fine-tuning our forthcoming foundation vision-language model on this dataset, we've observed some encouraging capabilities, such as converting sketches directly into functional HTML code.",
"raw": "After fine-tuning our forthcoming foundation vision-language model on this dataset, we've observed some encouraging capabilities, such as converting sketches directly into functional HTML code.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "We're excited to hear your thoughts and suggestions for future versions. What would you like to see next? Feel free to open a discussion on the hub!",
"raw": "We're excited to hear your thoughts and suggestions for future versions. What would you like to see next? Feel free to open a discussion on the hub!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Dataset: ",
"raw": "Dataset: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/HuggingFaceM4/WebSight",
"href": null,
"resource": {
"type": "dataset",
"id": "HuggingFaceM4/WebSight",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/HuggingFaceM4/WebSight",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Technical report: ",
"raw": "Technical report: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2403.09029",
"href": null,
"resource": {
"type": "paper",
"id": "2403.09029",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2403.09029",
"code": null,
"user": null,
"label": "Unlocking the conversion of Web Screenshots into HTML Code with the\n WebSight Dataset (2403.09029)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Blog post: ",
"raw": "Blog post: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/blog/websight",
"href": "https://huggingface.co/blog/websight",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Google Colab: ",
"raw": "Google Colab: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://colab.research.google.com/drive/1LdamGKR2oacrDk-kYwz_Wfc1-RBUdzcO?usp=sharing",
"href": "https://colab.research.google.com/drive/1LdamGKR2oacrDk-kYwz_Wfc1-RBUdzcO?usp=sharing",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Work done with ",
"raw": "Work done with ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@VictorSanh",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "VictorSanh",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@Leyo",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "Leyo",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | With the new WebSight dataset, converting the screenshot of a web page to its corresponding HTML code is just one fine-tuning step away
We release a new version of our synthetic dataset:
-Real images within web pages 🖼️
-Tailwind CSS 🎨
-2M examples 📈
Our initial release, v0.1, featured web designs in HTML + CSS, using simple colored rectangles as image placeholders.
It was a good start to help models grasp the basics of web page structure and coding associations.
Yet, it was missing the look of a real website.
Improving visual appeal, we've now embedded actual images in our web designs, ensuring they match the site's content for a more authentic look.
Switching to Tailwind CSS offers a more compact representation of the code.
We've also expanded our dataset to 2 million examples!
After fine-tuning our forthcoming foundation vision-language model on this dataset, we've observed some encouraging capabilities, such as converting sketches directly into functional HTML code.
We're excited to hear your thoughts and suggestions for future versions. What would you like to see next? Feel free to open a discussion on the hub!
Dataset: https://huggingface.co/datasets/HuggingFaceM4/WebSight
Technical report: https://huggingface.co/papers/2403.09029
Blog post: https://huggingface.co/blog/websight
Google Colab: https://colab.research.google.com/drive/1LdamGKR2oacrDk-kYwz_Wfc1-RBUdzcO?usp=sharing
Work done with @VictorSanh @Leyo | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1635201569275-noauth.jpeg",
"fullname": "Hugo Laurençon",
"name": "HugoLaurencon",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 146,
"isFollowing": false
} | [] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1652185658647-6244866a456803e9500d0f6a.jpeg",
"fullname": "Leo Tronchon",
"name": "Leyo",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 68
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1619623771844-5ecea265968f6028e0559fa5.jpeg",
"fullname": "Victor Sanh",
"name": "VictorSanh",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 206
}
] | [
{
"reaction": "🚀",
"users": [
"VictorSanh",
"jeremy-london",
"aidystark",
"ajibawa-2023",
"seyf1elislam",
"Tonic",
"osanseviero",
"clem",
"Joseph717171"
],
"count": 9
},
{
"reaction": "🔥",
"users": [
"facenuma",
"Sylvestre",
"Tonic",
"osanseviero",
"clem",
"Joseph717171"
],
"count": 6
},
{
"reaction": "🧠",
"users": [
"aidystark",
"Tonic",
"osanseviero",
"WesPro",
"Joseph717171"
],
"count": 5
},
{
"reaction": "➕",
"users": [
"VictorSanh",
"aidystark",
"Tonic",
"clem"
],
"count": 4
}
] | 2024-03-15T15:45:33.000Z | 2024-03-19T12:17:52.556Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1635298773955-noauth.jpeg",
"fullname": "Aidy Osu",
"name": "aidystark",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 6,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64aea8ff67511bd3d965697b/Jxn52EmDF5RApJh8antxn.jpeg",
"fullname": "Feynman Innovations",
"name": "ajibawa-2023",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 138,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1635201569275-noauth.jpeg",
"fullname": "Hugo Laurençon",
"name": "HugoLaurencon",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 146,
"isFollowing": false
}
] | /posts/HugoLaurencon/723540773348725 | 779 | 4 |
881543730128630 | [
{
"type": "text",
"value": "🎉Today, the 5000th Sentence Transformer model was uploaded to Hugging Face! Embedding models are extremely versatile, so it's no wonder that they're still being trained.",
"raw": "🎉Today, the 5000th Sentence Transformer model was uploaded to Hugging Face! Embedding models are extremely versatile, so it's no wonder that they're still being trained.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Here's a few resources to get you started with them:",
"raw": "Here's a few resources to get you started with them:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- All Sentence Transformer models: ",
"raw": "- All Sentence Transformer models: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/models?library=sentence-transformers&sort=trending",
"href": "https://huggingface.co/models?library=sentence-transformers&sort=trending",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Sentence Transformer documentation: ",
"raw": "- Sentence Transformer documentation: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://sbert.net/",
"href": "https://sbert.net/",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Massive Text Embedding Benchmark (MTEB) Leaderboard: ",
"raw": "- Massive Text Embedding Benchmark (MTEB) Leaderboard: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/mteb/leaderboard",
"href": null,
"resource": {
"type": "space",
"id": "mteb/leaderboard",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/mteb/leaderboard",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The embedding space is extremely active right now, so if you're using an embedding model for your retrieval, semantic similarity, reranking, classification, clustering, etc., then be sure to keep an eye out on the trending Sentence Transformer models & new models on MTEB.",
"raw": "The embedding space is extremely active right now, so if you're using an embedding model for your retrieval, semantic similarity, reranking, classification, clustering, etc., then be sure to keep an eye out on the trending Sentence Transformer models & new models on MTEB.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Also, I'm curious if you've ever used Sentence Transformers via a third party library, like a RAG framework or vector database. I'm quite interested in more integrations to bring everyone free, efficient & powerful embedding models!",
"raw": "Also, I'm curious if you've ever used Sentence Transformers via a third party library, like a RAG framework or vector database. I'm quite interested in more integrations to bring everyone free, efficient & powerful embedding models!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 🎉Today, the 5000th Sentence Transformer model was uploaded to Hugging Face! Embedding models are extremely versatile, so it's no wonder that they're still being trained.
Here's a few resources to get you started with them:
- All Sentence Transformer models: https://huggingface.co/models?library=sentence-transformers&sort=trending
- Sentence Transformer documentation: https://sbert.net/
- Massive Text Embedding Benchmark (MTEB) Leaderboard: https://huggingface.co/spaces/mteb/leaderboard
The embedding space is extremely active right now, so if you're using an embedding model for your retrieval, semantic similarity, reranking, classification, clustering, etc., then be sure to keep an eye out on the trending Sentence Transformer models & new models on MTEB.
Also, I'm curious if you've ever used Sentence Transformers via a third party library, like a RAG framework or vector database. I'm quite interested in more integrations to bring everyone free, efficient & powerful embedding models!
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6317233cc92fd6fee317e030/cJHSvvimr1kqgQfHOjO5n.png",
"fullname": "Tom Aarsen",
"name": "tomaarsen",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 1060,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🔥",
"users": [
"slava-medvedev",
"merve",
"AtAndDev",
"urchade",
"osanseviero",
"carlesoctav",
"ajibawa-2023",
"pcuenq",
"clem",
"Waldoh",
"hassamniaz7",
"alvarobartt",
"aamirshakir",
"nickprock"
],
"count": 14
},
{
"reaction": "❤️",
"users": [
"samusenps",
"osanseviero",
"pcuenq",
"clem",
"alvarobartt",
"aamirshakir",
"SeanLee97"
],
"count": 7
}
] | 2024-03-15T15:24:04.000Z | 2024-03-15T15:24:04.217Z | [] | /posts/tomaarsen/881543730128630 | 1,330 | 0 |
617808815576456 | [
{
"type": "text",
"value": "When Greg Brockman demo-ed GPT4 by hand-sketching a joke website on a piece of paper and asking the system to convert that into an HTML webpage, it blew my mind.",
"raw": "When Greg Brockman demo-ed GPT4 by hand-sketching a joke website on a piece of paper and asking the system to convert that into an HTML webpage, it blew my mind.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Can you build your own Screenshot-to-HTML system with much fewer resources?",
"raw": "Can you build your own Screenshot-to-HTML system with much fewer resources?",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "With this new resource, most likely yes! Current vision-language models can learn this task with the right data (and the right tricks).",
"raw": "With this new resource, most likely yes! Current vision-language models can learn this task with the right data (and the right tricks).",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "We have iterated on WebSight-v0.1 and are releasing its v0.2.",
"raw": "We have iterated on WebSight-v0.1 and are releasing its v0.2.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "WebSight is an open dataset of synthetically generated webpages with their corresponding rendered screenshot.",
"raw": "WebSight is an open dataset of synthetically generated webpages with their corresponding rendered screenshot.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "A few noticeable improvements:",
"raw": "A few noticeable improvements:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- 💨From traditional CSS to Tailwind CSS. Tailwind is CSS directly embedded in the HTML attribute class and is much more compact",
"raw": "- 💨From traditional CSS to Tailwind CSS. Tailwind is CSS directly embedded in the HTML attribute class and is much more compact",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- 🚛2M pairs of synthetic HTML webpages with their associated rendered screenshot, along with the prompt generated by an LLM to create that webpage",
"raw": "- 🚛2M pairs of synthetic HTML webpages with their associated rendered screenshot, along with the prompt generated by an LLM to create that webpage",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- 🖼️Much more visually appealing pages with the integration of real images",
"raw": "- 🖼️Much more visually appealing pages with the integration of real images",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "👀Blog: ",
"raw": "👀Blog: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/blog/websight",
"href": "https://huggingface.co/blog/websight",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "💽Dataset: ",
"raw": "💽Dataset: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/HuggingFaceM4/WebSight",
"href": null,
"resource": {
"type": "dataset",
"id": "HuggingFaceM4/WebSight",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/HuggingFaceM4/WebSight",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📜Technical report: ",
"raw": "📜Technical report: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2403.09029",
"href": null,
"resource": {
"type": "paper",
"id": "2403.09029",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2403.09029",
"code": null,
"user": null,
"label": "Unlocking the conversion of Web Screenshots into HTML Code with the\n WebSight Dataset (2403.09029)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🎮Want to create your own synthetic data pipelines? A starting point: ",
"raw": "🎮Want to create your own synthetic data pipelines? A starting point: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://colab.research.google.com/drive/1LdamGKR2oacrDk-kYwz_Wfc1-RBUdzcO?usp=sharing",
"href": "https://colab.research.google.com/drive/1LdamGKR2oacrDk-kYwz_Wfc1-RBUdzcO?usp=sharing",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Built with ",
"raw": "Built with ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@HugoLaurencon",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "HugoLaurencon",
"label": null,
"lang": null
},
{
"type": "text",
"value": " & ",
"raw": " & ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@Leyo",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "Leyo",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | When Greg Brockman demo-ed GPT4 by hand-sketching a joke website on a piece of paper and asking the system to convert that into an HTML webpage, it blew my mind.
Can you build your own Screenshot-to-HTML system with much fewer resources?
With this new resource, most likely yes! Current vision-language models can learn this task with the right data (and the right tricks).
We have iterated on WebSight-v0.1 and are releasing its v0.2.
WebSight is an open dataset of synthetically generated webpages with their corresponding rendered screenshot.
A few noticeable improvements:
- 💨From traditional CSS to Tailwind CSS. Tailwind is CSS directly embedded in the HTML attribute class and is much more compact
- 🚛2M pairs of synthetic HTML webpages with their associated rendered screenshot, along with the prompt generated by an LLM to create that webpage
- 🖼️Much more visually appealing pages with the integration of real images
👀Blog: https://huggingface.co/blog/websight
💽Dataset: https://huggingface.co/datasets/HuggingFaceM4/WebSight
📜Technical report: https://huggingface.co/papers/2403.09029
🎮Want to create your own synthetic data pipelines? A starting point: https://colab.research.google.com/drive/1LdamGKR2oacrDk-kYwz_Wfc1-RBUdzcO?usp=sharing
Built with @HugoLaurencon & @Leyo | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1619623771844-5ecea265968f6028e0559fa5.jpeg",
"fullname": "Victor Sanh",
"name": "VictorSanh",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 206,
"isFollowing": false
} | [] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1635201569275-noauth.jpeg",
"fullname": "Hugo Laurençon",
"name": "HugoLaurencon",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 146
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1652185658647-6244866a456803e9500d0f6a.jpeg",
"fullname": "Leo Tronchon",
"name": "Leyo",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 68
}
] | [
{
"reaction": "🔥",
"users": [
"HugoLaurencon",
"osanseviero",
"Sylvestre",
"merve",
"clem",
"vinwizard"
],
"count": 6
},
{
"reaction": "🧠",
"users": [
"osanseviero",
"GabeGaidos",
"clem",
"Csplk"
],
"count": 4
},
{
"reaction": "❤️",
"users": [
"merve",
"samusenps",
"clem",
"Csplk"
],
"count": 4
}
] | 2024-03-15T14:12:28.000Z | 2024-03-15T14:12:28.580Z | [] | /posts/VictorSanh/617808815576456 | 767 | 0 |
853315170369291 | [
{
"type": "text",
"value": "Diaries of Open Source. Part 5!",
"raw": "Diaries of Open Source. Part 5!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🤯Contextual KTO Mistral PairRM: this model combines iterative KTO, SnorkelAI DPO dataset, Allenai PairRM for ranking, Mistral for the base model, and is a very strong model with Claude 3 quality on AlpacaEval 2.0",
"raw": "🤯Contextual KTO Mistral PairRM: this model combines iterative KTO, SnorkelAI DPO dataset, Allenai PairRM for ranking, Mistral for the base model, and is a very strong model with Claude 3 quality on AlpacaEval 2.0",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Final model: ",
"raw": "Final model: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/ContextualAI/Contextual_KTO_Mistral_PairRM",
"href": null,
"resource": {
"type": "model",
"id": "ContextualAI/Contextual_KTO_Mistral_PairRM",
"discussionNum": null
},
"url": "https://hf.co/ContextualAI/Contextual_KTO_Mistral_PairRM",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Dataset: ",
"raw": "Dataset: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/datasets/snorkelai/Snorkel-Mistral-PairRM-DPO-Dataset",
"href": null,
"resource": {
"type": "dataset",
"id": "snorkelai/Snorkel-Mistral-PairRM-DPO-Dataset",
"discussionNum": null
},
"url": "https://hf.co/datasets/snorkelai/Snorkel-Mistral-PairRM-DPO-Dataset",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Leaderboard: ",
"raw": "Leaderboard: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://tatsu-lab.github.io/alpaca_eval/",
"href": "https://tatsu-lab.github.io/alpaca_eval/",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Base model: ",
"raw": "Base model: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/mistralai/Mistral-7B-Instruct-v0.2",
"href": null,
"resource": {
"type": "model",
"id": "mistralai/Mistral-7B-Instruct-v0.2",
"discussionNum": null
},
"url": "https://hf.co/mistralai/Mistral-7B-Instruct-v0.2",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🤏 tinyBenchmarks: Quick and cheap LLM evaluation!",
"raw": "🤏 tinyBenchmarks: Quick and cheap LLM evaluation!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Code: ",
"raw": "Code: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/felipemaiapolo/tinyBenchmarks",
"href": "https://github.com/felipemaiapolo/tinyBenchmarks",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Paper: ",
"raw": "Paper: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/papers/2402.14992",
"href": null,
"resource": {
"type": "paper",
"id": "2402.14992",
"discussionNum": null
},
"url": "https://hf.co/papers/2402.14992",
"code": null,
"user": null,
"label": "tinyBenchmarks: evaluating LLMs with fewer examples (2402.14992)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Data: ",
"raw": "Data: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/datasets/tinyBenchmarks/tinyMMLU",
"href": null,
"resource": {
"type": "dataset",
"id": "tinyBenchmarks/tinyMMLU",
"discussionNum": null
},
"url": "https://hf.co/datasets/tinyBenchmarks/tinyMMLU",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🎨Transformers.js 2.16 includes StableLM, speaker verification and diarization, and better chat templating. Try some fun demos!",
"raw": "🎨Transformers.js 2.16 includes StableLM, speaker verification and diarization, and better chat templating. Try some fun demos!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- ",
"raw": "- ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/spaces/Xenova/video-object-detection",
"href": null,
"resource": {
"type": "space",
"id": "Xenova/video-object-detection",
"discussionNum": null
},
"url": "https://hf.co/spaces/Xenova/video-object-detection",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- ",
"raw": "- ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/spaces/Xenova/cross-encoder-web",
"href": null,
"resource": {
"type": "space",
"id": "Xenova/cross-encoder-web",
"discussionNum": null
},
"url": "https://hf.co/spaces/Xenova/cross-encoder-web",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- ",
"raw": "- ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/spaces/Xenova/the-tokenizer-playground",
"href": null,
"resource": {
"type": "space",
"id": "Xenova/the-tokenizer-playground",
"discussionNum": null
},
"url": "https://hf.co/spaces/Xenova/the-tokenizer-playground",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🏴☠️ Abascus Liberated-Qwen1.5-72B, a Qwen 72B-based model that strongly follows system prompts",
"raw": "🏴☠️ Abascus Liberated-Qwen1.5-72B, a Qwen 72B-based model that strongly follows system prompts",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Model: ",
"raw": "Model: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/abacusai/Liberated-Qwen1.5-72B",
"href": null,
"resource": {
"type": "model",
"id": "abacusai/Liberated-Qwen1.5-72B",
"discussionNum": null
},
"url": "https://hf.co/abacusai/Liberated-Qwen1.5-72B",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "👀Design2Code: benchmark of webpage screenshots to code",
"raw": "👀Design2Code: benchmark of webpage screenshots to code",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Data: ",
"raw": "Data: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/datasets/SALT-NLP/Design2Code",
"href": null,
"resource": {
"type": "dataset",
"id": "SALT-NLP/Design2Code",
"discussionNum": null
},
"url": "https://hf.co/datasets/SALT-NLP/Design2Code",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Project ",
"raw": "Project ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://salt-nlp.github.io/Design2Code/",
"href": "https://salt-nlp.github.io/Design2Code/",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Paper ",
"raw": "Paper ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/papers/2403.03163",
"href": null,
"resource": {
"type": "paper",
"id": "2403.03163",
"discussionNum": null
},
"url": "https://hf.co/papers/2403.03163",
"code": null,
"user": null,
"label": "Design2Code: How Far Are We From Automating Front-End Engineering? (2403.03163)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🌎Data and models around the world",
"raw": "🌎Data and models around the world",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- One of the biggest Italian datasets ",
"raw": "- One of the biggest Italian datasets ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://hf.co/datasets/manalog/UsenetArchiveIT",
"href": "https://hf.co/datasets/manalog/UsenetArchiveIT",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- IndicLLMSuite: argest Pre-training and Instruction Fine-tuning dataset collection across 22 Indic languages ",
"raw": "- IndicLLMSuite: argest Pre-training and Instruction Fine-tuning dataset collection across 22 Indic languages ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/collections/ai4bharat/indicllmsuite-65ee7d225c337fcfa0991707",
"href": null,
"resource": {
"type": "collection",
"id": "ai4bharat/indicllmsuite-65ee7d225c337fcfa0991707",
"discussionNum": null
},
"url": "https://hf.co/collections/ai4bharat/indicllmsuite-65ee7d225c337fcfa0991707",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Hebrew-Gemma-11B, the best base Hebrew model ",
"raw": "- Hebrew-Gemma-11B, the best base Hebrew model ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/yam-peleg/Hebrew-Gemma-11B",
"href": null,
"resource": {
"type": "model",
"id": "yam-peleg/Hebrew-Gemma-11B",
"discussionNum": null
},
"url": "https://huggingface.co/yam-peleg/Hebrew-Gemma-11B",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Komodo-7B, a family of multiple Indonesian languages LLMs ",
"raw": "- Komodo-7B, a family of multiple Indonesian languages LLMs ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/Yellow-AI-NLP/komodo-7b-base",
"href": null,
"resource": {
"type": "model",
"id": "Yellow-AI-NLP/komodo-7b-base",
"discussionNum": null
},
"url": "https://hf.co/Yellow-AI-NLP/komodo-7b-base",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "You can find the previous part at ",
"raw": "You can find the previous part at ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/posts/osanseviero/127895284909100",
"href": "https://huggingface.co/posts/osanseviero/127895284909100",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Diaries of Open Source. Part 5!
🤯Contextual KTO Mistral PairRM: this model combines iterative KTO, SnorkelAI DPO dataset, Allenai PairRM for ranking, Mistral for the base model, and is a very strong model with Claude 3 quality on AlpacaEval 2.0
Final model: https://hf.co/ContextualAI/Contextual_KTO_Mistral_PairRM
Dataset: https://hf.co/datasets/snorkelai/Snorkel-Mistral-PairRM-DPO-Dataset
Leaderboard: https://tatsu-lab.github.io/alpaca_eval/
Base model: https://hf.co/mistralai/Mistral-7B-Instruct-v0.2
🤏 tinyBenchmarks: Quick and cheap LLM evaluation!
Code: https://github.com/felipemaiapolo/tinyBenchmarks
Paper: https://hf.co/papers/2402.14992
Data: https://hf.co/datasets/tinyBenchmarks/tinyMMLU
🎨Transformers.js 2.16 includes StableLM, speaker verification and diarization, and better chat templating. Try some fun demos!
- https://hf.co/spaces/Xenova/video-object-detection
- https://hf.co/spaces/Xenova/cross-encoder-web
- https://hf.co/spaces/Xenova/the-tokenizer-playground
🏴☠️ Abascus Liberated-Qwen1.5-72B, a Qwen 72B-based model that strongly follows system prompts
Model: https://hf.co/abacusai/Liberated-Qwen1.5-72B
👀Design2Code: benchmark of webpage screenshots to code
Data: https://hf.co/datasets/SALT-NLP/Design2Code
Project https://salt-nlp.github.io/Design2Code/
Paper https://hf.co/papers/2403.03163
🌎Data and models around the world
- One of the biggest Italian datasets https://hf.co/datasets/manalog/UsenetArchiveIT
- IndicLLMSuite: argest Pre-training and Instruction Fine-tuning dataset collection across 22 Indic languages https://hf.co/collections/ai4bharat/indicllmsuite-65ee7d225c337fcfa0991707
- Hebrew-Gemma-11B, the best base Hebrew model https://huggingface.co/yam-peleg/Hebrew-Gemma-11B
- Komodo-7B, a family of multiple Indonesian languages LLMs https://hf.co/Yellow-AI-NLP/komodo-7b-base
You can find the previous part at https://huggingface.co/posts/osanseviero/127895284909100 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6032802e1f993496bc14d9e3/w6hr-DEQot4VVkoyRIBiy.png",
"fullname": "Omar Sanseviero",
"name": "osanseviero",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2868,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🔥",
"users": [
"DmitryRyumin",
"alielfilali01",
"mikonvergence",
"merve",
"samusenps",
"ajibawa-2023",
"pcuenq",
"SSamDav",
"victor",
"Stopwolf"
],
"count": 10
},
{
"reaction": "👀",
"users": [
"Jaward",
"LucasWeber"
],
"count": 2
}
] | 2024-03-15T12:04:28.000Z | 2024-03-15T12:04:28.066Z | [] | /posts/osanseviero/853315170369291 | 722 | 0 |
623907162608854 | [
{
"type": "text",
"value": "Komodo-7B is here !! Today we are releasing the base version of Komodo-7B along with the technical report. ",
"raw": "Komodo-7B is here !! Today we are releasing the base version of Komodo-7B along with the technical report. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Komodo-7B is a family of LLMs that consist of Komodo-7B-Base and Komodo-7B-Instruct.",
"raw": "Komodo-7B is a family of LLMs that consist of Komodo-7B-Base and Komodo-7B-Instruct.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Komodo-7B performers really good in multiple Indonesian languages including Indonesian, Acehnese, Balinese, Banjarese, Buginese, Dayak Ngaju, Javanese, Lampungnese, Madurese, Minangkabau, Sundanese, and Toba Batak. ",
"raw": "Komodo-7B performers really good in multiple Indonesian languages including Indonesian, Acehnese, Balinese, Banjarese, Buginese, Dayak Ngaju, Javanese, Lampungnese, Madurese, Minangkabau, Sundanese, and Toba Batak. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Our model outperforms various existing large language models including some multilingual models.",
"raw": "Our model outperforms various existing large language models including some multilingual models.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Technical Report: ",
"raw": "Technical Report: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://arxiv.org/abs/2403.09362",
"href": "https://arxiv.org/abs/2403.09362",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Base Model HuggingFace: ",
"raw": "Base Model HuggingFace: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/Yellow-AI-NLP/komodo-7b-base",
"href": null,
"resource": {
"type": "model",
"id": "Yellow-AI-NLP/komodo-7b-base",
"discussionNum": null
},
"url": "https://huggingface.co/Yellow-AI-NLP/komodo-7b-base",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Kudos to the team ",
"raw": "Kudos to the team ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@louisowen6",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "louisowen6",
"label": null,
"lang": null
},
{
"type": "text",
"value": " , ",
"raw": " , ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@akanyaani",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "akanyaani",
"label": null,
"lang": null
},
{
"type": "text",
"value": " & ",
"raw": " & ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@biddwan",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "biddwan",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2403.09362",
"href": null,
"resource": {
"type": "paper",
"id": "2403.09362",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2403.09362",
"code": null,
"user": null,
"label": "Komodo: A Linguistic Expedition into Indonesia's Regional Languages (2403.09362)",
"lang": null
}
] | Komodo-7B is here !! Today we are releasing the base version of Komodo-7B along with the technical report.
Komodo-7B is a family of LLMs that consist of Komodo-7B-Base and Komodo-7B-Instruct.
Komodo-7B performers really good in multiple Indonesian languages including Indonesian, Acehnese, Balinese, Banjarese, Buginese, Dayak Ngaju, Javanese, Lampungnese, Madurese, Minangkabau, Sundanese, and Toba Batak.
Our model outperforms various existing large language models including some multilingual models.
Technical Report: https://arxiv.org/abs/2403.09362
Base Model HuggingFace: https://huggingface.co/Yellow-AI-NLP/komodo-7b-base
Kudos to the team @louisowen6 , @akanyaani & @biddwan https://huggingface.co/papers/2403.09362 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/638828121901766b88076aa1/rXlOO7eewmmaSN_hQIVz7.jpeg",
"fullname": "Vishesh Tripathi",
"name": "vishesh-t27",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 7,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/638828121901766b88076aa1/2yDjR4HEhEhFiz18KyHRg.jpeg"
}
] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62cd4b03c5cc157be82f0b56/aSbFUqq85kVx-jlmk_3-_.jpeg",
"fullname": "Abhay kumar",
"name": "akanyaani",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 3
},
{
"avatarUrl": "/avatars/b4356003b7673f3d259ae413e6451dd4.svg",
"fullname": "Biddwan Ahmed",
"name": "biddwan",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6071c4b270e11b30cfcfd7a3/-1ekCBzSTpqxkkul0bgmI.jpeg",
"fullname": "Louis Owen",
"name": "louisowen6",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 5
}
] | [
{
"reaction": "🔥",
"users": [
"louisowen6",
"Kernel",
"biddwan",
"samusenps",
"osanseviero",
"kramp",
"hoangdev1104",
"merve",
"victor",
"taufiqdp",
"loubnabnl"
],
"count": 11
},
{
"reaction": "🚀",
"users": [
"louisowen6",
"biddwan",
"merve"
],
"count": 3
},
{
"reaction": "🤗",
"users": [
"VitorJose",
"Andrianalx113"
],
"count": 2
}
] | 2024-03-15T04:39:19.000Z | 2024-03-15T04:39:45.466Z | [] | /posts/vishesh-t27/623907162608854 | 570 | 0 |
117997091400234 | [
{
"type": "text",
"value": "We released 🧨 Diffusers 0.27.0, and it's a versatile release 💫",
"raw": "We released 🧨 Diffusers 0.27.0, and it's a versatile release 💫",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Among other things, we shipped:",
"raw": "Among other things, we shipped:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "* Stable Cascade",
"raw": "* Stable Cascade",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "* Playground v2.5 and EDM-style training",
"raw": "* Playground v2.5 and EDM-style training",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "* EDM-formulated schedulers ",
"raw": "* EDM-formulated schedulers ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "* Trajectory Consistency Distillation for accelerated sampling ",
"raw": "* Trajectory Consistency Distillation for accelerated sampling ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "* A new guide on merging LoRAs",
"raw": "* A new guide on merging LoRAs",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "* A new image editing pipeline -- LEDITS++",
"raw": "* A new image editing pipeline -- LEDITS++",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Check out the release notes to catch everything that went into the release",
"raw": "Check out the release notes to catch everything that went into the release",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/huggingface/diffusers/releases/tag/v0.27.0",
"href": "https://github.com/huggingface/diffusers/releases/tag/v0.27.0",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Thanks to everyone that contributed to the release 🤗",
"raw": "Thanks to everyone that contributed to the release 🤗",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | We released 🧨 Diffusers 0.27.0, and it's a versatile release 💫
Among other things, we shipped:
* Stable Cascade
* Playground v2.5 and EDM-style training
* EDM-formulated schedulers
* Trajectory Consistency Distillation for accelerated sampling
* A new guide on merging LoRAs
* A new image editing pipeline -- LEDITS++
Check out the release notes to catch everything that went into the release
https://github.com/huggingface/diffusers/releases/tag/v0.27.0
Thanks to everyone that contributed to the release 🤗 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1649681653581-5f7fbd813e94f16a85448745.jpeg",
"fullname": "Sayak Paul",
"name": "sayakpaul",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 459,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🔥",
"users": [
"DmitryRyumin",
"osanseviero",
"kramp",
"mvaloatto",
"AdinaY",
"fffiloni",
"merve"
],
"count": 7
},
{
"reaction": "❤️",
"users": [
"samusenps",
"osanseviero",
"louisbrulenaudet",
"AndroidAttack"
],
"count": 4
}
] | 2024-03-15T02:59:39.000Z | 2024-03-15T02:59:39.078Z | [] | /posts/sayakpaul/117997091400234 | 652 | 0 |
694895364628283 | [
{
"type": "text",
"value": "\"Follow-Your-Click: Open-domain Regional Image Animation via Short Prompts\" is a new framework designed to animate specific regions within an image through user inputs. ",
"raw": "\"Follow-Your-Click: Open-domain Regional Image Animation via Short Prompts\" is a new framework designed to animate specific regions within an image through user inputs. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Key points:",
"raw": "Key points:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "* Enables precise animation of selected image regions with just a user click and a concise motion description.",
"raw": "* Enables precise animation of selected image regions with just a user click and a concise motion description.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "* Achieves promising results for generating localized animations.",
"raw": "* Achieves promising results for generating localized animations.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Paper: ",
"raw": "Paper: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2403.08268",
"href": null,
"resource": {
"type": "paper",
"id": "2403.08268",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2403.08268",
"code": null,
"user": null,
"label": "Follow-Your-Click: Open-domain Regional Image Animation via Short\n Prompts (2403.08268)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Congrats to the authors for their work!",
"raw": "Congrats to the authors for their work!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | "Follow-Your-Click: Open-domain Regional Image Animation via Short Prompts" is a new framework designed to animate specific regions within an image through user inputs.
Key points:
* Enables precise animation of selected image regions with just a user click and a concise motion description.
* Achieves promising results for generating localized animations.
Paper: https://huggingface.co/papers/2403.08268
Congrats to the authors for their work! | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/657217faabb25ed8aedd5e48/UUHAXeGtOnQBXFD3nYtf2.jpeg",
"fullname": "Vlad Bogolin",
"name": "vladbogo",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 109,
"isFollowing": false
} | [
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/657217faabb25ed8aedd5e48/mAMVfUYPutho7M_2qu8MS.qt"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/657217faabb25ed8aedd5e48/vsAfUWJcMrhxVQ12rfG8o.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/657217faabb25ed8aedd5e48/uOpfpHbY_UXrgJBw1FJTE.png"
}
] | [] | [
{
"reaction": "🔥",
"users": [
"merterbak",
"samusenps",
"xsmq",
"DmitryRyumin",
"merve",
"victor"
],
"count": 6
}
] | 2024-03-14T22:49:34.000Z | 2024-03-14T22:49:34.710Z | [] | /posts/vladbogo/694895364628283 | 518 | 0 |
695838392164690 | [
{
"type": "text",
"value": "Huggingface is carrying the AI open source ecosystem ",
"raw": "Huggingface is carrying the AI open source ecosystem ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huyenchip.com/2024/03/14/ai-oss.html",
"href": "https://huyenchip.com/2024/03/14/ai-oss.html",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Huggingface is carrying the AI open source ecosystem https://huyenchip.com/2024/03/14/ai-oss.html | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6011db60aa32e5620759af6d/sjrkUXJA_EZAtxtdAhkJR.jpeg",
"fullname": "Chip Huyen",
"name": "chiphuyen",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 98,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6011db60aa32e5620759af6d/UcboK-zhJdQt_qZtoND1v.png"
}
] | [] | [
{
"reaction": "🚀",
"users": [
"osanseviero",
"samusenps",
"nikilpatel94",
"julien-c",
"lewtun",
"lvwerra",
"merve",
"loubnabnl",
"AdinaY",
"joaogante",
"hanguyen146",
"RaushanTurganbay",
"thomwolf",
"andrewrreed",
"xianbao",
"nezubn",
"ajibawa-2023",
"beberik",
"pcuenq",
"LucienL",
"michaelbenayoun",
"rafaelpierrehf",
"Srulikbd"
],
"count": 23
},
{
"reaction": "❤️",
"users": [
"osanseviero",
"Zyn123",
"lewtun",
"merve",
"AdinaY",
"joaogante",
"RaushanTurganbay",
"thomwolf",
"andrewrreed",
"sayakpaul",
"xianbao",
"evdcush",
"pcuenq",
"LucienL",
"michaelbenayoun",
"rafaelpierrehf",
"jeffboudier",
"Bkarine"
],
"count": 18
},
{
"reaction": "🤗",
"users": [
"osanseviero",
"kramp",
"lewtun",
"merve",
"loubnabnl",
"AdinaY",
"RaushanTurganbay",
"thomwolf",
"andrewrreed",
"LucienL",
"michaelbenayoun",
"rafaelpierrehf",
"jeffboudier"
],
"count": 13
}
] | 2024-03-14T22:02:04.000Z | 2024-03-15T14:54:46.722Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6032802e1f993496bc14d9e3/w6hr-DEQot4VVkoyRIBiy.png",
"fullname": "Omar Sanseviero",
"name": "osanseviero",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2868,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5dd96eb166059660ed1ee413/NQtzmrDdbG0H8qkZvRyGk.jpeg",
"fullname": "Julien Chaumond",
"name": "julien-c",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 1580,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1655385361868-61b85ce86eb1f2c5e6233736.jpeg",
"fullname": "Vaibhav Srivastav",
"name": "reach-vb",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 460,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63a369d98c0c89dcae3b8329/6OUJ7Hc9T1jXynYH3FGaf.png",
"fullname": "Adina Yakefu",
"name": "AdinaY",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 240,
"isFollowing": false
}
] | /posts/chiphuyen/695838392164690 | 722 | 4 |
268972860465429 | [
{
"type": "text",
"value": "Simple and Scalable Strategies to Continually Pre-train Large Language Models",
"raw": "Simple and Scalable Strategies to Continually Pre-train Large Language Models",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2403.08763",
"href": null,
"resource": {
"type": "paper",
"id": "2403.08763",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2403.08763",
"code": null,
"user": null,
"label": "Simple and Scalable Strategies to Continually Pre-train Large Language\n Models (2403.08763)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Large language models (LLMs) are routinely pre-trained on billions of tokens, only to start the process over again once new data becomes available. A much more efficient solution is to continually pre-train these models, saving significant compute compared to re-training. However, the distribution shift induced by new data typically results in degraded performance on previous data or poor adaptation to the new data. In this work, we show that a simple and scalable combination of learning rate (LR) re-warming, LR re-decaying, and replay of previous data is sufficient to match the performance of fully re-training from scratch on all available data, as measured by final loss and language model (LM) evaluation benchmarks. Specifically, we show this for a weak but realistic distribution shift between two commonly used LLM pre-training datasets (EnglishrightarrowEnglish) and a stronger distribution shift (EnglishrightarrowGerman) at the 405M parameter model scale with large dataset sizes (hundreds of billions of tokens). Selecting the weak but realistic shift for larger-scale experiments, we also find that our continual learning strategies match the re-training baseline for a 10B parameter LLM. Our results demonstrate that LLMs can be successfully updated via simple and scalable continual learning strategies, matching the re-training baseline using only a fraction of the compute. Finally, inspired by previous work, we propose alternatives to the cosine learning rate schedule that help circumvent forgetting induced by LR re-warming and that are not bound to a fixed token budget.",
"raw": "Large language models (LLMs) are routinely pre-trained on billions of tokens, only to start the process over again once new data becomes available. A much more efficient solution is to continually pre-train these models, saving significant compute compared to re-training. However, the distribution shift induced by new data typically results in degraded performance on previous data or poor adaptation to the new data. In this work, we show that a simple and scalable combination of learning rate (LR) re-warming, LR re-decaying, and replay of previous data is sufficient to match the performance of fully re-training from scratch on all available data, as measured by final loss and language model (LM) evaluation benchmarks. Specifically, we show this for a weak but realistic distribution shift between two commonly used LLM pre-training datasets (EnglishrightarrowEnglish) and a stronger distribution shift (EnglishrightarrowGerman) at the 405M parameter model scale with large dataset sizes (hundreds of billions of tokens). Selecting the weak but realistic shift for larger-scale experiments, we also find that our continual learning strategies match the re-training baseline for a 10B parameter LLM. Our results demonstrate that LLMs can be successfully updated via simple and scalable continual learning strategies, matching the re-training baseline using only a fraction of the compute. Finally, inspired by previous work, we propose alternatives to the cosine learning rate schedule that help circumvent forgetting induced by LR re-warming and that are not bound to a fixed token budget.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Simple and Scalable Strategies to Continually Pre-train Large Language Models
https://huggingface.co/papers/2403.08763
Large language models (LLMs) are routinely pre-trained on billions of tokens, only to start the process over again once new data becomes available. A much more efficient solution is to continually pre-train these models, saving significant compute compared to re-training. However, the distribution shift induced by new data typically results in degraded performance on previous data or poor adaptation to the new data. In this work, we show that a simple and scalable combination of learning rate (LR) re-warming, LR re-decaying, and replay of previous data is sufficient to match the performance of fully re-training from scratch on all available data, as measured by final loss and language model (LM) evaluation benchmarks. Specifically, we show this for a weak but realistic distribution shift between two commonly used LLM pre-training datasets (EnglishrightarrowEnglish) and a stronger distribution shift (EnglishrightarrowGerman) at the 405M parameter model scale with large dataset sizes (hundreds of billions of tokens). Selecting the weak but realistic shift for larger-scale experiments, we also find that our continual learning strategies match the re-training baseline for a 10B parameter LLM. Our results demonstrate that LLMs can be successfully updated via simple and scalable continual learning strategies, matching the re-training baseline using only a fraction of the compute. Finally, inspired by previous work, we propose alternatives to the cosine learning rate schedule that help circumvent forgetting induced by LR re-warming and that are not bound to a fixed token budget. | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674929746905-60f1abe7544c2adfd699860c.jpeg",
"fullname": "AK",
"name": "akhaliq",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 5205,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/60f1abe7544c2adfd699860c/EzZ6XatruNlqhrFG4sqKO.png"
}
] | [] | [
{
"reaction": "❤️",
"users": [
"samusenps",
"boapps",
"osanseviero",
"Kernel",
"neerajjulka",
"RachidAR",
"dolemole",
"anthonyivn",
"rnella01",
"PcHome20242",
"TangoHumanity",
"louisbrulenaudet"
],
"count": 12
},
{
"reaction": "👀",
"users": [
"bshada",
"pietrolesci"
],
"count": 2
},
{
"reaction": "🚀",
"users": [
"aswathitsme"
],
"count": 1
},
{
"reaction": "🤗",
"users": [
"RichardForests"
],
"count": 1
}
] | 2024-03-14T15:08:59.000Z | 2024-03-14T15:08:59.001Z | [] | /posts/akhaliq/268972860465429 | 788 | 0 |
354334873318056 | [
{
"type": "text",
"value": "I've released several new Hugging Face Spaces. ",
"raw": "I've released several new Hugging Face Spaces. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "My primary objective is to create consistent character facial animation using image-to-image techniques:",
"raw": "My primary objective is to create consistent character facial animation using image-to-image techniques:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/Akjava/CreateConsistentCharacterFacialAnimationWithImg2Img",
"href": null,
"resource": {
"type": "space",
"id": "Akjava/CreateConsistentCharacterFacialAnimationWithImg2Img",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/Akjava/CreateConsistentCharacterFacialAnimationWithImg2Img",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "A short-term goal is create simple talk-head animation.",
"raw": "A short-term goal is create simple talk-head animation.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "WebP-3-Frame-Talking-Animation",
"raw": "WebP-3-Frame-Talking-Animation",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/Akjava/AIDiagramChatWithVoice-FaceCharacter",
"href": null,
"resource": {
"type": "space",
"id": "Akjava/AIDiagramChatWithVoice-FaceCharacter",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/Akjava/AIDiagramChatWithVoice-FaceCharacter",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "[Space]",
"raw": "[Space]",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- GPU tools",
"raw": "- GPU tools",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Flux1-schnell img2img",
"raw": "Flux1-schnell img2img",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/Akjava/flux1-schnell-img2img",
"href": null,
"resource": {
"type": "space",
"id": "Akjava/flux1-schnell-img2img",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/Akjava/flux1-schnell-img2img",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Flux1-schnell Inpaint with mask-file",
"raw": "Flux1-schnell Inpaint with mask-file",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/Akjava/flux1-schnell-img2img",
"href": null,
"resource": {
"type": "space",
"id": "Akjava/flux1-schnell-img2img",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/Akjava/flux1-schnell-img2img",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " - Tiny CPU tools",
"raw": " - Tiny CPU tools",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "WebP-3F-TH - create webp animation from 3 images",
"raw": "WebP-3F-TH - create webp animation from 3 images",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "OpenCV-Inapint - classic inpaint",
"raw": "OpenCV-Inapint - classic inpaint",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Whitebalance - simple white balance",
"raw": "Whitebalance - simple white balance",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Paste Image - just paste image with mask",
"raw": "Paste Image - just paste image with mask",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "WebP Resize Convert - resize and convert webp-animation ",
"raw": "WebP Resize Convert - resize and convert webp-animation ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | I've released several new Hugging Face Spaces.
My primary objective is to create consistent character facial animation using image-to-image techniques:
https://huggingface.co/spaces/Akjava/CreateConsistentCharacterFacialAnimationWithImg2Img
A short-term goal is create simple talk-head animation.
WebP-3-Frame-Talking-Animation
https://huggingface.co/spaces/Akjava/AIDiagramChatWithVoice-FaceCharacter
[Space]
- GPU tools
Flux1-schnell img2img
https://huggingface.co/spaces/Akjava/flux1-schnell-img2img
Flux1-schnell Inpaint with mask-file
https://huggingface.co/spaces/Akjava/flux1-schnell-img2img
- Tiny CPU tools
WebP-3F-TH - create webp animation from 3 images
OpenCV-Inapint - classic inpaint
Whitebalance - simple white balance
Paste Image - just paste image with mask
WebP Resize Convert - resize and convert webp-animation
| {
"avatarUrl": "/avatars/fb866e3758189d70488fc6a879151f45.svg",
"fullname": "Akihito Miyazaki",
"name": "Akjava",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 4,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-11-03T23:47:03.000Z | 2024-11-03T23:47:03.131Z | [] | /posts/Akjava/354334873318056 | 697 | 0 |
153695739796256 | [
{
"type": "text",
"value": "We are happy to introduce MedIT SUN 1B, a downscaled version of the MedIT SUN 2.5B Llama 3.2 variant.",
"raw": "We are happy to introduce MedIT SUN 1B, a downscaled version of the MedIT SUN 2.5B Llama 3.2 variant.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Give it a try!",
"raw": "Give it a try!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/meditsolutions/Llama-3.2-SUN-1B-chat",
"href": null,
"resource": {
"type": "model",
"id": "meditsolutions/Llama-3.2-SUN-1B-chat",
"discussionNum": null
},
"url": "https://huggingface.co/meditsolutions/Llama-3.2-SUN-1B-chat",
"code": null,
"user": null,
"label": null,
"lang": null
}
] | We are happy to introduce MedIT SUN 1B, a downscaled version of the MedIT SUN 2.5B Llama 3.2 variant.
Give it a try!
https://huggingface.co/meditsolutions/Llama-3.2-SUN-1B-chat | {
"avatarUrl": "/avatars/4fe71fbf6a7aa19380e38345b9de9d04.svg",
"fullname": "Mariusz Kurman",
"name": "mkurman",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-11-03T20:34:28.000Z | 2024-11-03T20:34:37.873Z | [] | /posts/mkurman/153695739796256 | 686 | 0 |
353004317978723 | [
{
"type": "text",
"value": "Do you guys want to see my training code for ",
"raw": "Do you guys want to see my training code for ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/nroggendorff/smallama",
"href": null,
"resource": {
"type": "model",
"id": "nroggendorff/smallama",
"discussionNum": null
},
"url": "https://huggingface.co/nroggendorff/smallama",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " ?",
"raw": " ?",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Do you guys want to see my training code for https://huggingface.co/nroggendorff/smallama ? | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/659f000b83abded48e190901/BnXL_XYbVX6PHngfQLECW.png",
"fullname": "Noa Roggendorff",
"name": "nroggendorff",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 141,
"isFollowing": false
} | [] | [] | [
{
"reaction": "😎",
"users": [
"John6666"
],
"count": 1
}
] | 2024-11-03T20:04:09.000Z | 2024-11-04T13:54:09.503Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/659f000b83abded48e190901/BnXL_XYbVX6PHngfQLECW.png",
"fullname": "Noa Roggendorff",
"name": "nroggendorff",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 141,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png",
"fullname": "John Smith",
"name": "John6666",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 398,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65d883893a52cd9bcd8ab7cf/tRsCJlHNZo1D02kBTmfy9.jpeg",
"fullname": "leroy Samuel Dyer",
"name": "LeroyDyer",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 84,
"isFollowing": false
}
] | /posts/nroggendorff/353004317978723 | 627 | 3 |
941309448600578 | [
{
"type": "text",
"value": "Imagine being able to talk directly to your API connection. \"I have a field in the CRM named Customer_ID that needs to map to a field in the ERP named ERP_Customer_ID.\" Imagine being able to give your API connections both a brain and swarm of agents as a body to execute any task or function. This isn't science fiction, this is the revolutionary power of Liquid API. A product 10 years in the making!",
"raw": "Imagine being able to talk directly to your API connection. \"I have a field in the CRM named Customer_ID that needs to map to a field in the ERP named ERP_Customer_ID.\" Imagine being able to give your API connections both a brain and swarm of agents as a body to execute any task or function. This isn't science fiction, this is the revolutionary power of Liquid API. A product 10 years in the making!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://youtu.be/cHI_k1Dkdr4",
"href": "https://youtu.be/cHI_k1Dkdr4",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Imagine being able to talk directly to your API connection. "I have a field in the CRM named Customer_ID that needs to map to a field in the ERP named ERP_Customer_ID." Imagine being able to give your API connections both a brain and swarm of agents as a body to execute any task or function. This isn't science fiction, this is the revolutionary power of Liquid API. A product 10 years in the making!
https://youtu.be/cHI_k1Dkdr4
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png",
"fullname": "Richard A Aragon",
"name": "TuringsSolutions",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 146,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/64274b69ba6cef0a6ebb0fd6/6FvfD9bfR9oHm10LvOgRH.jpeg"
}
] | [] | [
{
"reaction": "👀",
"users": [
"John6666",
"HamedEmine"
],
"count": 2
}
] | 2024-11-03T18:50:51.000Z | 2024-11-04T18:20:45.252Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1653051419389-62878fdc70af5d9106e3e892.png",
"fullname": "K S",
"name": "MultiTrickFox",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 5,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png",
"fullname": "Richard A Aragon",
"name": "TuringsSolutions",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 146,
"isFollowing": false
}
] | /posts/TuringsSolutions/941309448600578 | 577 | 2 |
555798537911917 | [
{
"type": "text",
"value": "New Mann-E model just released:",
"raw": "New Mann-E model just released:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/mann-e/mann-e_flux",
"href": null,
"resource": {
"type": "model",
"id": "mann-e/mann-e_flux",
"discussionNum": null
},
"url": "https://huggingface.co/mann-e/mann-e_flux",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "I will be glad if you test it!",
"raw": "I will be glad if you test it!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | New Mann-E model just released:
https://huggingface.co/mann-e/mann-e_flux
I will be glad if you test it! | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/637251142f98dcc049b349de/kkRLjyaO55_nFrTNWRZFQ.jpeg",
"fullname": "Haghiri",
"name": "Muhammadreza",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 26,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-11-03T16:44:25.000Z | 2024-11-03T16:44:25.016Z | [] | /posts/Muhammadreza/555798537911917 | 503 | 0 |
342387295885636 | [
{
"type": "text",
"value": "hi everyone,",
"raw": "hi everyone,",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "i have trained a Qwen 14b model on a smaller dataset, but its now very tricky because i have got nowhere to use it via inference (the paid for inference on hf costs quite a lot), does anyone know of anywhere where i can deploy my model and use it via api for a reasonable cost, or ideally none. thanks",
"raw": "i have trained a Qwen 14b model on a smaller dataset, but its now very tricky because i have got nowhere to use it via inference (the paid for inference on hf costs quite a lot), does anyone know of anywhere where i can deploy my model and use it via api for a reasonable cost, or ideally none. thanks",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | hi everyone,
i have trained a Qwen 14b model on a smaller dataset, but its now very tricky because i have got nowhere to use it via inference (the paid for inference on hf costs quite a lot), does anyone know of anywhere where i can deploy my model and use it via api for a reasonable cost, or ideally none. thanks | {
"avatarUrl": "/avatars/7be1913712fdd1ffe75967ed19007720.svg",
"fullname": "stock mining",
"name": "automatedstockminingorg",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 10,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"John6666",
"robertomachorro",
"hakutaku",
"victor"
],
"count": 4
}
] | 2024-11-03T08:10:19.000Z | 2024-11-04T12:14:05.992Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png",
"fullname": "John Smith",
"name": "John6666",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 398,
"isFollowing": false
},
{
"avatarUrl": "/avatars/1c2788196f8786f8fc259e60403a64f5.svg",
"fullname": "Jelle De Loecker",
"name": "skerit",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65d883893a52cd9bcd8ab7cf/tRsCJlHNZo1D02kBTmfy9.jpeg",
"fullname": "leroy Samuel Dyer",
"name": "LeroyDyer",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 84,
"isFollowing": false
},
{
"avatarUrl": "/avatars/c0d7fc43144c8ec3ca2aac1cef0d6f98.svg",
"fullname": "Jack Smith",
"name": "hakutaku",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6374bb2119c264fe6fb3153c/sE9OAyFexJkGoWea_8Oy_.png",
"fullname": "Nyaribari Reuben",
"name": "foscraft",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
},
{
"avatarUrl": "/avatars/5565505abdd4ab3dbc958c9e63ba12ff.svg",
"fullname": "Simoes",
"name": "joaomsimoes",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
}
] | /posts/automatedstockminingorg/342387295885636 | 2,347 | 6 |
350161263239420 | [
{
"type": "text",
"value": "LLaMA-O1: Open Large Reasoning Model Frameworks For Training, Inference and Evaluation With PyTorch and HuggingFace",
"raw": "LLaMA-O1: Open Large Reasoning Model Frameworks For Training, Inference and Evaluation With PyTorch and HuggingFace",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Large Reasoning Models powered by Monte Carlo Tree Search (MCTS), Self-Play Reinforcement Learning, PPO, AlphaGo Zero's dua policy paradigm and Large Language Models! ",
"raw": "Large Reasoning Models powered by Monte Carlo Tree Search (MCTS), Self-Play Reinforcement Learning, PPO, AlphaGo Zero's dua policy paradigm and Large Language Models! ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/SimpleBerry/LLaMA-O1/",
"href": "https://github.com/SimpleBerry/LLaMA-O1/",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "What will happen when you compound MCTS ❤ LLM ❤ Self-Play ❤RLHF?",
"raw": "What will happen when you compound MCTS ❤ LLM ❤ Self-Play ❤RLHF?",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Just a little bite of strawberry!🍓",
"raw": "Just a little bite of strawberry!🍓",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Past related works:",
"raw": "Past related works:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2410.02884",
"href": null,
"resource": {
"type": "paper",
"id": "2410.02884",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2410.02884",
"code": null,
"user": null,
"label": "LLaMA-Berry: Pairwise Optimization for O1-like Olympiad-Level\n Mathematical Reasoning (2410.02884)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2406.07394",
"href": null,
"resource": {
"type": "paper",
"id": "2406.07394",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2406.07394",
"code": null,
"user": null,
"label": "Accessing GPT-4 level Mathematical Olympiad Solutions via Monte Carlo\n Tree Self-refine with LLaMa-3 8B (2406.07394)",
"lang": null
}
] | LLaMA-O1: Open Large Reasoning Model Frameworks For Training, Inference and Evaluation With PyTorch and HuggingFace
Large Reasoning Models powered by Monte Carlo Tree Search (MCTS), Self-Play Reinforcement Learning, PPO, AlphaGo Zero's dua policy paradigm and Large Language Models!
https://github.com/SimpleBerry/LLaMA-O1/
What will happen when you compound MCTS ❤ LLM ❤ Self-Play ❤RLHF?
Just a little bite of strawberry!🍓
Past related works:
https://huggingface.co/papers/2410.02884
https://huggingface.co/papers/2406.07394 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64bce15bafd1e46c5504ad38/bQFX1iFbXEBXcQvUNL811.png",
"fullname": "Di Zhang",
"name": "qq8933",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 108,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/64bce15bafd1e46c5504ad38/mrGEDFPp9QC7jZ7cOXBVH.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/64bce15bafd1e46c5504ad38/b859bDNIaOVTFjif1f7cU.png"
}
] | [] | [
{
"reaction": "👍",
"users": [
"iwancobain",
"John6666",
"zsqzz",
"jwu323",
"ALYTV",
"Azzedde",
"Svngoku",
"timmylai",
"nbroad",
"csabakecskemeti",
"Syzygianinfern0",
"sekkit",
"flozi00",
"qftop",
"victor",
"ajibawa-2023",
"seyf1elislam",
"KvrParaskevi",
"dingo-actual",
"createtheimaginable",
"ai-everyday"
],
"count": 21
},
{
"reaction": "🔥",
"users": [
"prithivMLmods",
"createtheimaginable",
"jwu323"
],
"count": 3
}
] | 2024-11-03T02:03:27.000Z | 2024-11-05T05:20:37.552Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/MNuArctG3OwNdey9j44Os.jpeg",
"fullname": "Paraskevi Kivroglou",
"name": "KvrParaskevi",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 6,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64bce15bafd1e46c5504ad38/bQFX1iFbXEBXcQvUNL811.png",
"fullname": "Di Zhang",
"name": "qq8933",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 108,
"isFollowing": false
}
] | /posts/qq8933/350161263239420 | 5,586 | 2 |
757664580544837 | [
{
"type": "text",
"value": "Hi there HuggingFacers!🤗",
"raw": "Hi there HuggingFacers!🤗",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Are you working with Streamlit on Spaces and struggling with authentication and user management?🧐",
"raw": "Are you working with Streamlit on Spaces and struggling with authentication and user management?🧐",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Well, you can check out my last community article (",
"raw": "Well, you can check out my last community article (",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/blog/as-cle-bert/streamlit-supabase-auth-ui",
"href": "https://huggingface.co/blog/as-cle-bert/streamlit-supabase-auth-ui",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": ") on a new python package I've been working on, that connects Supabase to Streamlit UI, in order to create a seamless authentication for your seamless Streamlit apps!🚀",
"raw": ") on a new python package I've been working on, that connects Supabase to Streamlit UI, in order to create a seamless authentication for your seamless Streamlit apps!🚀",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "You can find a demo of it on Spaces: ",
"raw": "You can find a demo of it on Spaces: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/as-cle-bert/streamlit-supabase-auth-ui",
"href": null,
"resource": {
"type": "space",
"id": "as-cle-bert/streamlit-supabase-auth-ui",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/as-cle-bert/streamlit-supabase-auth-ui",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Have fun!🍕",
"raw": "Have fun!🍕",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Hi there HuggingFacers!🤗
Are you working with Streamlit on Spaces and struggling with authentication and user management?🧐
Well, you can check out my last community article (https://huggingface.co/blog/as-cle-bert/streamlit-supabase-auth-ui) on a new python package I've been working on, that connects Supabase to Streamlit UI, in order to create a seamless authentication for your seamless Streamlit apps!🚀
You can find a demo of it on Spaces: https://huggingface.co/spaces/as-cle-bert/streamlit-supabase-auth-ui
Have fun!🍕 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65e330e7edc2f7306e252448/ucpk9c8x0UafGM4mXTrRy.jpeg",
"fullname": "Astra Clelia Bertelli",
"name": "as-cle-bert",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 650,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65e330e7edc2f7306e252448/R2Nu4rNbJB-lBe7wQaQoZ.png"
}
] | [] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-11-03T01:53:16.000Z | 2024-11-03T01:53:16.219Z | [] | /posts/as-cle-bert/757664580544837 | 754 | 0 |
624245127298035 | [
{
"type": "text",
"value": "OmniGen 1-Click Automatic Installers for Windows, RunPod and Massed Compute",
"raw": "OmniGen 1-Click Automatic Installers for Windows, RunPod and Massed Compute",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "OmniGen is a unified image generation model that can generate a wide range of images from multi-modal prompts. It is designed to be simple, flexible, and easy to use",
"raw": "OmniGen is a unified image generation model that can generate a wide range of images from multi-modal prompts. It is designed to be simple, flexible, and easy to use",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Installers are here : ",
"raw": "Installers are here : ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://www.patreon.com/posts/omnigen-1-click-115233922",
"href": "https://www.patreon.com/posts/omnigen-1-click-115233922",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Look attached images to understand what capabilities it has. It is simply amazing so many features.",
"raw": "Look attached images to understand what capabilities it has. It is simply amazing so many features.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "What is OmniGen : ",
"raw": "What is OmniGen : ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/VectorSpaceLab/OmniGen",
"href": "https://github.com/VectorSpaceLab/OmniGen",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Windows Requirements",
"raw": "Windows Requirements",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Python 3.10.11, CUDA 12.4, Git, FFMPEG, cuDNN 9.x, C++ Tools",
"raw": "Python 3.10.11, CUDA 12.4, Git, FFMPEG, cuDNN 9.x, C++ Tools",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "A tutorial that shows how to install all above : ",
"raw": "A tutorial that shows how to install all above : ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://youtu.be/DrhUHnYfwC0",
"href": "https://youtu.be/DrhUHnYfwC0",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "How To Install & Use",
"raw": "How To Install & Use",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "After installing requirements by following above tutorial, double-click Windows_Install.bat and install",
"raw": "After installing requirements by following above tutorial, double-click Windows_Install.bat and install",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "After that use Windows_Start.bat to start the app",
"raw": "After that use Windows_Start.bat to start the app",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "When offload_model is enabled (checked) on the Gradio interface, it uses 5.4 GB VRAM, 2x slower",
"raw": "When offload_model is enabled (checked) on the Gradio interface, it uses 5.4 GB VRAM, 2x slower",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "When offload_model is not used (not checked) it uses 12.2 GB VRAM",
"raw": "When offload_model is not used (not checked) it uses 12.2 GB VRAM",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "When separate_cfg_infer is not checked, and offload_model is not checked, it uses 18.7 GB VRAM",
"raw": "When separate_cfg_infer is not checked, and offload_model is not checked, it uses 18.7 GB VRAM",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "To install on RunPod and Massed Compute please follow Massed_Compute_Instructions_READ.txt and Runpod_Instructions_READ.txt",
"raw": "To install on RunPod and Massed Compute please follow Massed_Compute_Instructions_READ.txt and Runpod_Instructions_READ.txt",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Look at the examples on the Gradio interface closely to understand how to use",
"raw": "Look at the examples on the Gradio interface closely to understand how to use",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | OmniGen 1-Click Automatic Installers for Windows, RunPod and Massed Compute
OmniGen is a unified image generation model that can generate a wide range of images from multi-modal prompts. It is designed to be simple, flexible, and easy to use
Installers are here : https://www.patreon.com/posts/omnigen-1-click-115233922
Look attached images to understand what capabilities it has. It is simply amazing so many features.
What is OmniGen : https://github.com/VectorSpaceLab/OmniGen
Windows Requirements
Python 3.10.11, CUDA 12.4, Git, FFMPEG, cuDNN 9.x, C++ Tools
A tutorial that shows how to install all above : https://youtu.be/DrhUHnYfwC0
How To Install & Use
After installing requirements by following above tutorial, double-click Windows_Install.bat and install
After that use Windows_Start.bat to start the app
When offload_model is enabled (checked) on the Gradio interface, it uses 5.4 GB VRAM, 2x slower
When offload_model is not used (not checked) it uses 12.2 GB VRAM
When separate_cfg_infer is not checked, and offload_model is not checked, it uses 18.7 GB VRAM
To install on RunPod and Massed Compute please follow Massed_Compute_Instructions_READ.txt and Runpod_Instructions_READ.txt
Look at the examples on the Gradio interface closely to understand how to use | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1672531901326-6345bd89fe134dfd7a0dba40.png",
"fullname": "Furkan Gözükara",
"name": "MonsterMMORPG",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 376,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/9jJqr0eQ__GVOAVi6RK-v.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/ObPa3ED-koiopiILWpCEL.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/bqfftCZBIM1oPiuZAJizx.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/ECkZO8sSE7xkMItnEwMKP.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/9xNfZcE64otFihHNYcIia.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/wXc7MB8y_1NoO3Eyi9Nul.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/DUeqB5h292pOsPZLXDYPV.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/DE4guYhS5R_sIqXeRWB2N.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/9o08lMKsIUL_hHpMv5dd8.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/bchgqPK4FuDGtChyRO-km.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/R19gM1TBiUXx25TaEPE6P.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/a6fJyqKWzktM7XItQi6YS.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/ypb4Gpon3fPC9h6hyizIw.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/TyvnlVHiHfquSt-Ocnxov.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/TpdHw9jlDnr7Oda4qocka.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/j_of6o5GVgtbI99D1qyUF.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/_mEcBVoCzFeu-5BKK22yY.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/VPAbn4venQ3CzTz8pW0m_.jpeg"
}
] | [] | [
{
"reaction": "🤯",
"users": [
"MonsterMMORPG",
"CYGDEN",
"salemseidmohamed",
"Sethblocks",
"clem"
],
"count": 5
},
{
"reaction": "👀",
"users": [
"MonsterMMORPG",
"John6666",
"CYGDEN",
"clem"
],
"count": 4
},
{
"reaction": "❤️",
"users": [
"MonsterMMORPG",
"CYGDEN",
"Vitorvolk",
"clem"
],
"count": 4
},
{
"reaction": "🔥",
"users": [
"MonsterMMORPG",
"CYGDEN"
],
"count": 2
},
{
"reaction": "🤗",
"users": [
"MonsterMMORPG",
"CYGDEN"
],
"count": 2
},
{
"reaction": "😎",
"users": [
"MonsterMMORPG",
"CYGDEN"
],
"count": 2
},
{
"reaction": "➕",
"users": [
"MonsterMMORPG",
"CYGDEN"
],
"count": 2
},
{
"reaction": "🧠",
"users": [
"MonsterMMORPG",
"CYGDEN"
],
"count": 2
},
{
"reaction": "👍",
"users": [
"MonsterMMORPG",
"CYGDEN"
],
"count": 2
},
{
"reaction": "🚀",
"users": [
"MonsterMMORPG"
],
"count": 1
},
{
"reaction": "🤝",
"users": [
"MonsterMMORPG"
],
"count": 1
}
] | 2024-11-03T00:48:46.000Z | 2024-11-03T00:48:46.811Z | [] | /posts/MonsterMMORPG/624245127298035 | 2,516 | 0 |
982010938778742 | [
{
"type": "text",
"value": "Smol models ftw! AMD released AMD OLMo 1B - beats OpenELM, tiny llama on MT Bench, Alpaca Eval - Apache 2.0 licensed 🔥",
"raw": "Smol models ftw! AMD released AMD OLMo 1B - beats OpenELM, tiny llama on MT Bench, Alpaca Eval - Apache 2.0 licensed 🔥",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "> Trained with 1.3 trillion (dolma 1.7) tokens on 16 nodes, each with 4 MI250 GPUs",
"raw": "> Trained with 1.3 trillion (dolma 1.7) tokens on 16 nodes, each with 4 MI250 GPUs",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "> Three checkpoints:",
"raw": "> Three checkpoints:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- AMD OLMo 1B: Pre-trained model",
"raw": "- AMD OLMo 1B: Pre-trained model",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- AMD OLMo 1B SFT: Supervised fine-tuned on Tulu V2, OpenHermes-2.5, WebInstructSub, and Code-Feedback datasets",
"raw": "- AMD OLMo 1B SFT: Supervised fine-tuned on Tulu V2, OpenHermes-2.5, WebInstructSub, and Code-Feedback datasets",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- AMD OLMo 1B SFT DPO: Aligned with human preferences using Direct Preference Optimization (DPO) on UltraFeedback dataset",
"raw": "- AMD OLMo 1B SFT DPO: Aligned with human preferences using Direct Preference Optimization (DPO) on UltraFeedback dataset",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Key Insights: ",
"raw": "Key Insights: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "> Pre-trained with less than half the tokens of OLMo-1B",
"raw": "> Pre-trained with less than half the tokens of OLMo-1B",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "> Post-training steps include two-phase SFT and DPO alignment",
"raw": "> Post-training steps include two-phase SFT and DPO alignment",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "> Data for SFT:",
"raw": "> Data for SFT:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Phase 1: Tulu V2",
"raw": "- Phase 1: Tulu V2",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Phase 2: OpenHermes-2.5, WebInstructSub, and Code-Feedback",
"raw": "- Phase 2: OpenHermes-2.5, WebInstructSub, and Code-Feedback",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "> Model checkpoints on the Hub & Integrated with Transformers ⚡️",
"raw": "> Model checkpoints on the Hub & Integrated with Transformers ⚡️",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Congratulations & kudos to AMD on a brilliant smol model release! 🤗",
"raw": "Congratulations & kudos to AMD on a brilliant smol model release! 🤗",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/amd/amd-olmo-6723e7d04a49116d8ec95070",
"href": null,
"resource": {
"type": "collection",
"id": "amd/amd-olmo-6723e7d04a49116d8ec95070",
"discussionNum": null
},
"url": "https://huggingface.co/collections/amd/amd-olmo-6723e7d04a49116d8ec95070",
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Smol models ftw! AMD released AMD OLMo 1B - beats OpenELM, tiny llama on MT Bench, Alpaca Eval - Apache 2.0 licensed 🔥
> Trained with 1.3 trillion (dolma 1.7) tokens on 16 nodes, each with 4 MI250 GPUs
> Three checkpoints:
- AMD OLMo 1B: Pre-trained model
- AMD OLMo 1B SFT: Supervised fine-tuned on Tulu V2, OpenHermes-2.5, WebInstructSub, and Code-Feedback datasets
- AMD OLMo 1B SFT DPO: Aligned with human preferences using Direct Preference Optimization (DPO) on UltraFeedback dataset
Key Insights:
> Pre-trained with less than half the tokens of OLMo-1B
> Post-training steps include two-phase SFT and DPO alignment
> Data for SFT:
- Phase 1: Tulu V2
- Phase 2: OpenHermes-2.5, WebInstructSub, and Code-Feedback
> Model checkpoints on the Hub & Integrated with Transformers ⚡️
Congratulations & kudos to AMD on a brilliant smol model release! 🤗
https://huggingface.co/collections/amd/amd-olmo-6723e7d04a49116d8ec95070 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1655385361868-61b85ce86eb1f2c5e6233736.jpeg",
"fullname": "Vaibhav Srivastav",
"name": "reach-vb",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 460,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/61b85ce86eb1f2c5e6233736/ElAtLjRyGDjUarACUyqlP.jpeg"
}
] | [] | [
{
"reaction": "🚀",
"users": [
"AtAndDev",
"John6666",
"CYGDEN",
"louisbrulenaudet",
"kimleang123",
"Dolfini",
"Joseph717171",
"not-lain",
"KvrParaskevi"
],
"count": 9
},
{
"reaction": "🔥",
"users": [
"AtAndDev",
"CYGDEN",
"Joseph717171",
"rizky-gumelar",
"not-lain"
],
"count": 5
}
] | 2024-11-02T17:40:04.000Z | 2024-11-02T17:40:19.088Z | [] | /posts/reach-vb/982010938778742 | 2,958 | 0 |
888482747169050 | [
{
"type": "text",
"value": "🚀🎭🌟 New Research Alert! 🌟🎭 🚀",
"raw": "🚀🎭🌟 New Research Alert! 🌟🎭 🚀",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📄 Title: VLOGGER: Multimodal Diffusion for Embodied Avatar Synthesis 🌟🚀",
"raw": "📄 Title: VLOGGER: Multimodal Diffusion for Embodied Avatar Synthesis 🌟🚀",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📝 Description: VLOGGER is a method for text- and audio-driven generation of talking human video from a single input image of a person, building on the success of recent generative diffusion models.",
"raw": "📝 Description: VLOGGER is a method for text- and audio-driven generation of talking human video from a single input image of a person, building on the success of recent generative diffusion models.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "👥 Authors: ",
"raw": "👥 Authors: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@enriccorona",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "enriccorona",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@Andreiz",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "Andreiz",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@kolotouros",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "kolotouros",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@thiemoall",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "thiemoall",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", and et al.",
"raw": ", and et al.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔗 Paper: ",
"raw": "🔗 Paper: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2403.08764",
"href": null,
"resource": {
"type": "paper",
"id": "2403.08764",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2403.08764",
"code": null,
"user": null,
"label": "VLOGGER: Multimodal Diffusion for Embodied Avatar Synthesis (2403.08764)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🌐 Github Page: ",
"raw": "🌐 Github Page: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://enriccorona.github.io/vlogger/",
"href": "https://enriccorona.github.io/vlogger/",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📚 More Papers: more cutting-edge research presented at other conferences in the ",
"raw": "📚 More Papers: more cutting-edge research presented at other conferences in the ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers",
"href": null,
"resource": {
"type": "space",
"id": "DmitryRyumin/NewEraAI-Papers",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " curated by ",
"raw": " curated by ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@DmitryRyumin",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "DmitryRyumin",
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🚀 Added to the Avatars Collection: ",
"raw": "🚀 Added to the Avatars Collection: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36",
"href": null,
"resource": {
"type": "collection",
"id": "DmitryRyumin/avatars-65df37cdf81fec13d4dbac36",
"discussionNum": null
},
"url": "https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔍 Keywords: #VLOGGER #EmbodiedAvatarSynthesis #MultimodalDiffusion #GenerativeDiffusionModels #DeepLearning #Animation #Innovation",
"raw": "🔍 Keywords: #VLOGGER #EmbodiedAvatarSynthesis #MultimodalDiffusion #GenerativeDiffusionModels #DeepLearning #Animation #Innovation",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 🚀🎭🌟 New Research Alert! 🌟🎭 🚀
📄 Title: VLOGGER: Multimodal Diffusion for Embodied Avatar Synthesis 🌟🚀
📝 Description: VLOGGER is a method for text- and audio-driven generation of talking human video from a single input image of a person, building on the success of recent generative diffusion models.
👥 Authors: @enriccorona, @Andreiz, @kolotouros, @thiemoall, and et al.
🔗 Paper: https://huggingface.co/papers/2403.08764
🌐 Github Page: https://enriccorona.github.io/vlogger/
📚 More Papers: more cutting-edge research presented at other conferences in the https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers curated by @DmitryRyumin
🚀 Added to the Avatars Collection: https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36
🔍 Keywords: #VLOGGER #EmbodiedAvatarSynthesis #MultimodalDiffusion #GenerativeDiffusionModels #DeepLearning #Animation #Innovation | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg",
"fullname": "Dmitry Ryumin",
"name": "DmitryRyumin",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 377,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/jnORLUwbpI15kWD6U3VoD.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/Yy3rcMUIdLoYRBMqjb9sr.gif"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/znpII5O5z9X021ixzhqiM.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/Soi-6MblYuW8uJ0D0bTEq.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/C1XY6PHNFduWp_diAD_wD.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/piWdGS_Xlz31ZSBLmmDPx.png"
}
] | [
{
"avatarUrl": "/avatars/c8bd4b51155cd37fb7e76c97b843d461.svg",
"fullname": "Andrei Zanfir",
"name": "Andreiz",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg",
"fullname": "Dmitry Ryumin",
"name": "DmitryRyumin",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 377
},
{
"avatarUrl": "/avatars/25c96e0ab25eeb183ac278b2f8ff6262.svg",
"fullname": "Enric Corona",
"name": "enriccorona",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null
},
{
"avatarUrl": "/avatars/53163398f1a629b9838548b808af53f7.svg",
"fullname": "Nikos Kolotouros",
"name": "kolotouros",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/648c964d39d2584ee47af19c/5UEkzDTMY8I3svKjR7kxN.jpeg",
"fullname": "Thiemo Alldieck",
"name": "thiemoall",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1
}
] | [
{
"reaction": "🔥",
"users": [
"DmitryRyumin",
"merve",
"fffiloni",
"whappolow",
"lunarflu",
"osanseviero",
"Nicolas55",
"AdinaY",
"mychen76",
"EddyGiusepe",
"Ivan18",
"beepboopbeep",
"zarazi",
"VanshRana12"
],
"count": 14
},
{
"reaction": "❤️",
"users": [
"merve",
"lunarflu",
"samusenps",
"God-of-ai",
"clefourrier"
],
"count": 5
},
{
"reaction": "👍",
"users": [
"rmandrad",
"lunarflu",
"Nicolas55",
"Ivan18"
],
"count": 4
}
] | 2024-03-14T12:08:20.000Z | 2024-03-14T12:18:17.694Z | [] | /posts/DmitryRyumin/888482747169050 | 517 | 0 |
260733259549602 | [
{
"type": "text",
"value": "🚀 Hello HF Posts World!",
"raw": "🚀 Hello HF Posts World!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "I'm excited to share in my first HF post that we at Neuraptic AI have released MAGNUM, the first open-source AI model designed to natively support any structured and unstructured data modality. ",
"raw": "I'm excited to share in my first HF post that we at Neuraptic AI have released MAGNUM, the first open-source AI model designed to natively support any structured and unstructured data modality. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "MAGNUM can learn a holistic representation of your business logic from any source of digital information—be it images, documents, emails, databases, audio, signals, and more. This rich context empowers it to deliver significantly more accurate answers.",
"raw": "MAGNUM can learn a holistic representation of your business logic from any source of digital information—be it images, documents, emails, databases, audio, signals, and more. This rich context empowers it to deliver significantly more accurate answers.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "If you want to know more about it, feel free to ask or read the paper here 🤗",
"raw": "If you want to know more about it, feel free to ask or read the paper here 🤗",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2403.04866",
"href": null,
"resource": {
"type": "paper",
"id": "2403.04866",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2403.04866",
"code": null,
"user": null,
"label": "A Modular End-to-End Multimodal Learning Method for Structured and\n Unstructured Data (2403.04866)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Have a nice week!",
"raw": "Have a nice week!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 🚀 Hello HF Posts World!
I'm excited to share in my first HF post that we at Neuraptic AI have released MAGNUM, the first open-source AI model designed to natively support any structured and unstructured data modality.
MAGNUM can learn a holistic representation of your business logic from any source of digital information—be it images, documents, emails, databases, audio, signals, and more. This rich context empowers it to deliver significantly more accurate answers.
If you want to know more about it, feel free to ask or read the paper here 🤗
https://huggingface.co/papers/2403.04866
Have a nice week! | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60db433e432bcf121f559073/Y3R-fUJWOVxhX3qV8bER7.jpeg",
"fullname": "Enrique Hernández Calabrés",
"name": "ehcalabres",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 5,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/60db433e432bcf121f559073/hg0RfkX1wBFER2s4wa-J2.png"
}
] | [] | [
{
"reaction": "❤️",
"users": [
"samusenps",
"osanseviero",
"Madhanraj",
"merve",
"lunarflu",
"AdinaY",
"danielus",
"clem",
"DataMorpheus"
],
"count": 9
},
{
"reaction": "🧠",
"users": [
"dioarafi",
"Lewdiculous",
"clem"
],
"count": 3
},
{
"reaction": "🤯",
"users": [
"merve",
"clem"
],
"count": 2
},
{
"reaction": "👍",
"users": [
"fffiloni",
"clem"
],
"count": 2
}
] | 2024-03-14T08:36:31.000Z | 2024-03-14T08:37:59.892Z | [] | /posts/ehcalabres/260733259549602 | 647 | 0 |
689112760209807 | [
{
"type": "text",
"value": "Hello HF! ",
"raw": "Hello HF! ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Recently, I authored an article on setting up multiple GPUs in a desktop PC. I discovered a bug during the process and have documented my observations in this article:",
"raw": "Recently, I authored an article on setting up multiple GPUs in a desktop PC. I discovered a bug during the process and have documented my observations in this article:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://morgangiraud.medium.com/multi-gpu-nvidia-p2p-capabilities-and-debugging-tips-fb7597b4e2b5",
"href": "https://morgangiraud.medium.com/multi-gpu-nvidia-p2p-capabilities-and-debugging-tips-fb7597b4e2b5",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "It's not directly related to ML, but maybe this will save someone some time! :)",
"raw": "It's not directly related to ML, but maybe this will save someone some time! :)",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Hello HF!
Recently, I authored an article on setting up multiple GPUs in a desktop PC. I discovered a bug during the process and have documented my observations in this article:
https://morgangiraud.medium.com/multi-gpu-nvidia-p2p-capabilities-and-debugging-tips-fb7597b4e2b5
It's not directly related to ML, but maybe this will save someone some time! :) | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6305cbe4df993a789e60e9dd/FgkGN9ALN8mkNZr_G0EJi.jpeg",
"fullname": "Morgan",
"name": "morgangiraud",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2,
"isFollowing": false
} | [] | [] | [
{
"reaction": "❤️",
"users": [
"samusenps",
"julien-c",
"osanseviero",
"victor"
],
"count": 4
},
{
"reaction": "🧠",
"users": [
"merve",
"fffiloni"
],
"count": 2
}
] | 2024-03-14T08:16:46.000Z | 2024-03-14T08:16:46.321Z | [] | /posts/morgangiraud/689112760209807 | 365 | 0 |
579935878616000 | [
{
"type": "text",
"value": "Prompt Engineering: Playing A Game of Chance With LLMs.",
"raw": "Prompt Engineering: Playing A Game of Chance With LLMs.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "It's obvious these days, that trying to get the best out of LLMs resembles playing a game of chance, where the choice of prompts acts as your moves in shaping the model's responses, as you recursively seek the best.",
"raw": "It's obvious these days, that trying to get the best out of LLMs resembles playing a game of chance, where the choice of prompts acts as your moves in shaping the model's responses, as you recursively seek the best.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Each prompt you craft carries the potential to lead the LLM down different paths, influencing the quality and relevance of its outputs. By experimenting with various prompts and observing how the model responds, you can uncover new insights into the inner workings of these complex systems and push the boundaries of what they can achieve.",
"raw": "Each prompt you craft carries the potential to lead the LLM down different paths, influencing the quality and relevance of its outputs. By experimenting with various prompts and observing how the model responds, you can uncover new insights into the inner workings of these complex systems and push the boundaries of what they can achieve.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Not long ago, this craftsmanship has been termed \"Prompt Engineering\", it's a job now. To better understand the \"Engineering\" of it, let's go through the paper by Google's Brain Team that shed light on it: Chain-of-Thought Prompting Elicits Reasoning in Large Language Models.",
"raw": "Not long ago, this craftsmanship has been termed \"Prompt Engineering\", it's a job now. To better understand the \"Engineering\" of it, let's go through the paper by Google's Brain Team that shed light on it: Chain-of-Thought Prompting Elicits Reasoning in Large Language Models.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The paper starts off with a clear definition of Chain-of-Thought — a coherent series of intermediate natural language reasoning steps that lead to the final answer for a problem.",
"raw": "The paper starts off with a clear definition of Chain-of-Thought — a coherent series of intermediate natural language reasoning steps that lead to the final answer for a problem.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The researchers explored how generating a series of intermediate reasoning steps significantly improves the ability of large language models to perform complex reasoning. They found that such reasoning abilities \"emerge naturally\" in sufficiently large language models via a simple method called chain of thought prompting, where a few chain of thought demonstrations are provided as exemplars in prompting.",
"raw": "The researchers explored how generating a series of intermediate reasoning steps significantly improves the ability of large language models to perform complex reasoning. They found that such reasoning abilities \"emerge naturally\" in sufficiently large language models via a simple method called chain of thought prompting, where a few chain of thought demonstrations are provided as exemplars in prompting.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Experiments on three large language models showed that chain of thought prompting improves performance on a range of arithmetic, commonsense, and symbolic reasoning tasks. For instance, prompting a 540B-parameter language model with just eight chain of thought exemplars achieves state of the art accuracy on the GSM8K benchmark of math word problems, surpassing even finetuned GPT-3 with a verifier.",
"raw": "Experiments on three large language models showed that chain of thought prompting improves performance on a range of arithmetic, commonsense, and symbolic reasoning tasks. For instance, prompting a 540B-parameter language model with just eight chain of thought exemplars achieves state of the art accuracy on the GSM8K benchmark of math word problems, surpassing even finetuned GPT-3 with a verifier.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "ReadMore: ",
"raw": "ReadMore: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://x.com/jaykef_/status/1767173517345485232?s=46&t=V2mWOpm9AdMX0spmmr0yNQ",
"href": "https://x.com/jaykef_/status/1767173517345485232?s=46&t=V2mWOpm9AdMX0spmmr0yNQ",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Prompt Engineering: Playing A Game of Chance With LLMs.
It's obvious these days, that trying to get the best out of LLMs resembles playing a game of chance, where the choice of prompts acts as your moves in shaping the model's responses, as you recursively seek the best.
Each prompt you craft carries the potential to lead the LLM down different paths, influencing the quality and relevance of its outputs. By experimenting with various prompts and observing how the model responds, you can uncover new insights into the inner workings of these complex systems and push the boundaries of what they can achieve.
Not long ago, this craftsmanship has been termed "Prompt Engineering", it's a job now. To better understand the "Engineering" of it, let's go through the paper by Google's Brain Team that shed light on it: Chain-of-Thought Prompting Elicits Reasoning in Large Language Models.
The paper starts off with a clear definition of Chain-of-Thought — a coherent series of intermediate natural language reasoning steps that lead to the final answer for a problem.
The researchers explored how generating a series of intermediate reasoning steps significantly improves the ability of large language models to perform complex reasoning. They found that such reasoning abilities "emerge naturally" in sufficiently large language models via a simple method called chain of thought prompting, where a few chain of thought demonstrations are provided as exemplars in prompting.
Experiments on three large language models showed that chain of thought prompting improves performance on a range of arithmetic, commonsense, and symbolic reasoning tasks. For instance, prompting a 540B-parameter language model with just eight chain of thought exemplars achieves state of the art accuracy on the GSM8K benchmark of math word problems, surpassing even finetuned GPT-3 with a verifier.
ReadMore: https://x.com/jaykef_/status/1767173517345485232?s=46&t=V2mWOpm9AdMX0spmmr0yNQ | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6438a9027de34e8ea7e4b257/vib8QSd1AWMr_bR9ig_xJ.jpeg",
"fullname": "Jaward Sesay",
"name": "Jaward",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 191,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/Ej2ljwBtD5JFZ1vHSKN0N.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/wsFuFTKM5NdUlk19N4Fuo.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/6BB58gWiQ192lNkriu4GR.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/QxW_4BVZl4XuOtbOHCaui.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/SxQQnXhjnzG9UOlDsB6S_.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/GBHF6ENMvu23Ho_ymdbPO.png"
}
] | [] | [
{
"reaction": "🤝",
"users": [
"victor",
"osanseviero"
],
"count": 2
},
{
"reaction": "👀",
"users": [
"merve"
],
"count": 1
}
] | 2024-03-14T08:16:11.000Z | 2024-03-14T08:16:11.384Z | [] | /posts/Jaward/579935878616000 | 355 | 0 |
623916206209217 | [
{
"type": "text",
"value": "New moondream update out with significantly improved OCR performance (among other benchmarks)! ",
"raw": "New moondream update out with significantly improved OCR performance (among other benchmarks)! ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/vikhyatk/moondream2",
"href": null,
"resource": {
"type": "model",
"id": "vikhyatk/moondream2",
"discussionNum": null
},
"url": "https://huggingface.co/vikhyatk/moondream2",
"code": null,
"user": null,
"label": null,
"lang": null
}
] | New moondream update out with significantly improved OCR performance (among other benchmarks)!
https://huggingface.co/vikhyatk/moondream2 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63117568fa95534e218da163/8h9zN8aKRxPLBnXW7sqY9.jpeg",
"fullname": "Vik Korrapati",
"name": "vikhyatk",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 375,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/63117568fa95534e218da163/p0lGjA6tpzbcpt2SCLFoS.jpeg"
}
] | [] | [
{
"reaction": "❤️",
"users": [
"merve",
"fffiloni",
"Csplk",
"Sylvestre",
"Tom-Neverwinter",
"not-lain",
"cnmoro",
"avinash02",
"damerajee",
"radames"
],
"count": 10
},
{
"reaction": "🔥",
"users": [
"Sylvestre",
"not-lain",
"radames",
"Tonic"
],
"count": 4
},
{
"reaction": "🤯",
"users": [
"osanseviero",
"minhdang",
"not-lain"
],
"count": 3
},
{
"reaction": "😎",
"users": [
"osanseviero",
"not-lain",
"Tonic"
],
"count": 3
}
] | 2024-03-14T03:20:48.000Z | 2024-03-15T06:06:48.343Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648113222875-6141a88b3a0ec78603c9e784.png",
"fullname": "Merve Noyan",
"name": "merve",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 5589,
"isFollowing": false
},
{
"avatarUrl": "/avatars/a58f768021c6d3a8c116076da5141f9b.svg",
"fullname": "Nate Nethercott",
"name": "nnethercott",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63117568fa95534e218da163/8h9zN8aKRxPLBnXW7sqY9.jpeg",
"fullname": "Vik Korrapati",
"name": "vikhyatk",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 375,
"isFollowing": false
}
] | /posts/vikhyatk/623916206209217 | 698 | 5 |
265522208136657 | [
{
"type": "text",
"value": "Synth^2 is a new approach that leverages large language models and text-to-image generators to create synthetic image-caption data for boosting visual-language model performance.",
"raw": "Synth^2 is a new approach that leverages large language models and text-to-image generators to create synthetic image-caption data for boosting visual-language model performance.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Key Points:",
"raw": "Key Points:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "* Overcomes data limitations by generating high-quality synthetic image-caption pairs, reducing reliance on costly human annotations.",
"raw": "* Overcomes data limitations by generating high-quality synthetic image-caption pairs, reducing reliance on costly human annotations.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "* Achieves competitive results on image captioning tasks using 40x less paired data than state-of-the-art methods.",
"raw": "* Achieves competitive results on image captioning tasks using 40x less paired data than state-of-the-art methods.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Paper: ",
"raw": "Paper: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2403.07750",
"href": null,
"resource": {
"type": "paper",
"id": "2403.07750",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2403.07750",
"code": null,
"user": null,
"label": "Synth$^2$: Boosting Visual-Language Models with Synthetic Captions and\n Image Embeddings (2403.07750)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Congrats to the authors for their work!",
"raw": "Congrats to the authors for their work!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Synth^2 is a new approach that leverages large language models and text-to-image generators to create synthetic image-caption data for boosting visual-language model performance.
Key Points:
* Overcomes data limitations by generating high-quality synthetic image-caption pairs, reducing reliance on costly human annotations.
* Achieves competitive results on image captioning tasks using 40x less paired data than state-of-the-art methods.
Paper: https://huggingface.co/papers/2403.07750
Congrats to the authors for their work! | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/657217faabb25ed8aedd5e48/UUHAXeGtOnQBXFD3nYtf2.jpeg",
"fullname": "Vlad Bogolin",
"name": "vladbogo",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 109,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/657217faabb25ed8aedd5e48/nof0VbzcnUyc6hpM1fmJC.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/657217faabb25ed8aedd5e48/-d5d71iiOQCkT25JwD11q.png"
}
] | [] | [
{
"reaction": "❤️",
"users": [
"samusenps",
"osanseviero",
"Zmu"
],
"count": 3
},
{
"reaction": "👍",
"users": [
"CocoSun",
"Wauplin"
],
"count": 2
}
] | 2024-03-13T21:45:04.000Z | 2024-03-13T21:45:04.475Z | [] | /posts/vladbogo/265522208136657 | 363 | 0 |
983146731735307 | [
{
"type": "text",
"value": "Wonderful open source Italian dataset from ",
"raw": "Wonderful open source Italian dataset from ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@manalog",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "manalog",
"label": null,
"lang": null
},
{
"type": "text",
"value": " and ",
"raw": " and ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@ruggsea",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "ruggsea",
"label": null,
"lang": null
},
{
"type": "text",
"value": ":",
"raw": ":",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/datasets/manalog/UsenetArchiveIT",
"href": "https://huggingface.co/datasets/manalog/UsenetArchiveIT",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The dataset contributes to the ",
"raw": "The dataset contributes to the ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/mii-community",
"href": "https://huggingface.co/mii-community",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " project, aimed at advancing the creation of Italian open-source Language Models (LLMs).🇮🇹 🤖 About 10-20 billion token, probably the best conversational open source dataset in the Italian language. 🇮🇹🇮🇹🇮🇹🇮🇹🇮🇹🇮🇹🇮🇹 ",
"raw": " project, aimed at advancing the creation of Italian open-source Language Models (LLMs).🇮🇹 🤖 About 10-20 billion token, probably the best conversational open source dataset in the Italian language. 🇮🇹🇮🇹🇮🇹🇮🇹🇮🇹🇮🇹🇮🇹 ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Wonderful open source Italian dataset from @manalog and @ruggsea:
https://huggingface.co/datasets/manalog/UsenetArchiveIT
The dataset contributes to the https://huggingface.co/mii-community project, aimed at advancing the creation of Italian open-source Language Models (LLMs).🇮🇹 🤖 About 10-20 billion token, probably the best conversational open source dataset in the Italian language. 🇮🇹🇮🇹🇮🇹🇮🇹🇮🇹🇮🇹🇮🇹 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5fef4eb7770b06e11c2c6381/1NMdigjCGtn0yvQZSi5NJ.png",
"fullname": "Alessandro Ercolani",
"name": "giux78",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 44,
"isFollowing": false
} | [] | [
{
"avatarUrl": "/avatars/641876a24d4ee45dab0f9723d7b9e7f1.svg",
"fullname": "Matteo Rinaldi",
"name": "mrinaldi",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 3
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/635f0b99cf0289e13c487bee/XieL1fCJkYToy5OGPPRy_.jpeg",
"fullname": "Ruggero Marino Lazzaroni",
"name": "ruggsea",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 8
}
] | [
{
"reaction": "🔥",
"users": [
"victor",
"giux78",
"ruggsea",
"samusenps",
"mrinaldi",
"osanseviero",
"gsarti",
"jackyes",
"AdinaY"
],
"count": 9
},
{
"reaction": "🤯",
"users": [
"osanseviero",
"giux78"
],
"count": 2
}
] | 2024-03-13T21:28:10.000Z | 2024-03-15T00:50:38.550Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/635f0b99cf0289e13c487bee/XieL1fCJkYToy5OGPPRy_.jpeg",
"fullname": "Ruggero Marino Lazzaroni",
"name": "ruggsea",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 8,
"isFollowing": false
}
] | /posts/giux78/983146731735307 | 333 | 2 |
171255453570637 | [
{
"type": "text",
"value": "A combined effort from the IBM + Pytorch teams achieved an incredible training performance with ZeRO/FSDP on par with 3D parallelism on H100s, while having just 800Gbps inter-node connection. ",
"raw": "A combined effort from the IBM + Pytorch teams achieved an incredible training performance with ZeRO/FSDP on par with 3D parallelism on H100s, while having just 800Gbps inter-node connection. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "This is because they got an almost full overlap between comms and compute and have introduced a novel selective activation recomputation method which recalculates only large but inexpensive activations.",
"raw": "This is because they got an almost full overlap between comms and compute and have introduced a novel selective activation recomputation method which recalculates only large but inexpensive activations.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Check out their post here: ",
"raw": "Check out their post here: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://pytorch.org/blog/maximizing-training/",
"href": "https://pytorch.org/blog/maximizing-training/",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | A combined effort from the IBM + Pytorch teams achieved an incredible training performance with ZeRO/FSDP on par with 3D parallelism on H100s, while having just 800Gbps inter-node connection.
This is because they got an almost full overlap between comms and compute and have introduced a novel selective activation recomputation method which recalculates only large but inexpensive activations.
Check out their post here: https://pytorch.org/blog/maximizing-training/ | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1594311341799-5f07383b19cb630495b812cd.jpeg",
"fullname": "Stas Bekman",
"name": "stas",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 97,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🤗",
"users": [
"sugatoray",
"victor",
"samusenps",
"osanseviero",
"stas",
"hunoutl",
"muhtasham"
],
"count": 7
}
] | 2024-03-13T18:04:41.000Z | 2024-03-13T18:04:41.831Z | [] | /posts/stas/171255453570637 | 734 | 0 |
307659739231169 | [
{
"type": "text",
"value": "Interesting paper: 𝐆𝐚𝐋𝐨𝐫𝐞: 𝐭𝐫𝐚𝐢𝐧 𝟕𝐁 𝐦𝐨𝐝𝐞𝐥𝐬 𝐨𝐧 𝐜𝐨𝐧𝐬𝐮𝐦𝐞𝐫-𝐠𝐫𝐚𝐝𝐞 𝐆𝐏𝐔𝐬 💪",
"raw": "Interesting paper: 𝐆𝐚𝐋𝐨𝐫𝐞: 𝐭𝐫𝐚𝐢𝐧 𝟕𝐁 𝐦𝐨𝐝𝐞𝐥𝐬 𝐨𝐧 𝐜𝐨𝐧𝐬𝐮𝐦𝐞𝐫-𝐠𝐫𝐚𝐝𝐞 𝐆𝐏𝐔𝐬 💪",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "It's now possible to 𝙛𝙪𝙡𝙡𝙮 𝙥𝙧𝙚-𝙩𝙧𝙖𝙞𝙣 a 7B model on a consumer-grade GPU of 24Gb RAM, without any performance loss!",
"raw": "It's now possible to 𝙛𝙪𝙡𝙡𝙮 𝙥𝙧𝙚-𝙩𝙧𝙖𝙞𝙣 a 7B model on a consumer-grade GPU of 24Gb RAM, without any performance loss!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The memory usage of training models has always been an acute issue. For instance full pre-training of a 7B model used to eat ~50Gb of RAM!",
"raw": "The memory usage of training models has always been an acute issue. For instance full pre-training of a 7B model used to eat ~50Gb of RAM!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The common workarounds to reduce memory load are:",
"raw": "The common workarounds to reduce memory load are:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- separate models on multiple GPUs (\"sharding\")",
"raw": "- separate models on multiple GPUs (\"sharding\")",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- quantize models: encode weights on fewer bits",
"raw": "- quantize models: encode weights on fewer bits",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Another technique is to 𝙥𝙧𝙤𝙟𝙚𝙘𝙩 𝙩𝙝𝙚 𝙬𝙚𝙞𝙜𝙝𝙩 𝙢𝙖𝙩𝙧𝙞𝙭 𝙩𝙤 𝙡𝙤𝙬𝙚𝙧-𝙧𝙖𝙣𝙠 𝙨𝙥𝙖𝙘𝙚𝙨, (since sometimes the weights do not really vary on all dimensions): this can save a lot of space!",
"raw": "Another technique is to 𝙥𝙧𝙤𝙟𝙚𝙘𝙩 𝙩𝙝𝙚 𝙬𝙚𝙞𝙜𝙝𝙩 𝙢𝙖𝙩𝙧𝙞𝙭 𝙩𝙤 𝙡𝙤𝙬𝙚𝙧-𝙧𝙖𝙣𝙠 𝙨𝙥𝙖𝙘𝙚𝙨, (since sometimes the weights do not really vary on all dimensions): this can save a lot of space!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "This low-rank projection can be done on adapters to preserve the original weights (go check out LoRA), but it still generally hurts the performance too much for pre-training.",
"raw": "This low-rank projection can be done on adapters to preserve the original weights (go check out LoRA), but it still generally hurts the performance too much for pre-training.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "➡️ Enter the authors of 𝘎𝘢𝘓𝘰𝘳𝘦: 𝘔𝘦𝘮𝘰𝘳𝘺-𝘌𝘧𝘧𝘪𝘤𝘪𝘦𝘯𝘵 𝘓𝘓𝘔 𝘛𝘳𝘢𝘪𝘯𝘪𝘯𝘨 𝘣𝘺 𝘎𝘳𝘢𝘥𝘪𝘦𝘯𝘵 𝘓𝘰𝘸-𝘙𝘢𝘯𝘬 𝘗𝘳𝘰𝘫𝘦𝘤𝘵𝘪𝘰𝘯. They gather (and prove) interesting insights:",
"raw": "➡️ Enter the authors of 𝘎𝘢𝘓𝘰𝘳𝘦: 𝘔𝘦𝘮𝘰𝘳𝘺-𝘌𝘧𝘧𝘪𝘤𝘪𝘦𝘯𝘵 𝘓𝘓𝘔 𝘛𝘳𝘢𝘪𝘯𝘪𝘯𝘨 𝘣𝘺 𝘎𝘳𝘢𝘥𝘪𝘦𝘯𝘵 𝘓𝘰𝘸-𝘙𝘢𝘯𝘬 𝘗𝘳𝘰𝘫𝘦𝘤𝘵𝘪𝘰𝘯. They gather (and prove) interesting insights:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "⛔ The weight matrix does not reliably converge to lower ranks during training.",
"raw": "⛔ The weight matrix does not reliably converge to lower ranks during training.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "✅ But the gradient matrix does!",
"raw": "✅ But the gradient matrix does!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Based on these insights, 𝘁𝗵𝗲𝘆 𝗯𝘂𝗶𝗹𝗱 𝗚𝗮𝗟𝗼𝗿𝗲, that projects the gradient to lower ranks.",
"raw": "Based on these insights, 𝘁𝗵𝗲𝘆 𝗯𝘂𝗶𝗹𝗱 𝗚𝗮𝗟𝗼𝗿𝗲, that projects the gradient to lower ranks.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🗺️ 𝗚𝗿𝗲𝗮𝘁 𝗶𝗱𝗲𝗮: to leave the optimization free to explore more space, they periodically re-build the low-rank projection throughout the training (a nice illustration is in the paper).",
"raw": "🗺️ 𝗚𝗿𝗲𝗮𝘁 𝗶𝗱𝗲𝗮: to leave the optimization free to explore more space, they periodically re-build the low-rank projection throughout the training (a nice illustration is in the paper).",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🤝 This method can even be combined with previous ones such as 8-bit Adam (quantizing the optimizer states to 8-bit).",
"raw": "🤝 This method can even be combined with previous ones such as 8-bit Adam (quantizing the optimizer states to 8-bit).",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "➡️ 𝐑𝐞𝐬𝐮𝐥𝐭𝐬:",
"raw": "➡️ 𝐑𝐞𝐬𝐮𝐥𝐭𝐬:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📉 Of course, huge reduction in memory footprint allowing the training on consumer-grade GPU (cf figure).",
"raw": "📉 Of course, huge reduction in memory footprint allowing the training on consumer-grade GPU (cf figure).",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "💪 No reduction in performance: this scales well up to 7B parameters (and was independently confirmed since) ⇒ this is essential, it confirms that the method is viable!",
"raw": "💪 No reduction in performance: this scales well up to 7B parameters (and was independently confirmed since) ⇒ this is essential, it confirms that the method is viable!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Read the full paper here: ",
"raw": "Read the full paper here: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2403.03507",
"href": null,
"resource": {
"type": "paper",
"id": "2403.03507",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2403.03507",
"code": null,
"user": null,
"label": "GaLore: Memory-Efficient LLM Training by Gradient Low-Rank Projection (2403.03507)",
"lang": null
}
] | Interesting paper: 𝐆𝐚𝐋𝐨𝐫𝐞: 𝐭𝐫𝐚𝐢𝐧 𝟕𝐁 𝐦𝐨𝐝𝐞𝐥𝐬 𝐨𝐧 𝐜𝐨𝐧𝐬𝐮𝐦𝐞𝐫-𝐠𝐫𝐚𝐝𝐞 𝐆𝐏𝐔𝐬 💪
It's now possible to 𝙛𝙪𝙡𝙡𝙮 𝙥𝙧𝙚-𝙩𝙧𝙖𝙞𝙣 a 7B model on a consumer-grade GPU of 24Gb RAM, without any performance loss!
The memory usage of training models has always been an acute issue. For instance full pre-training of a 7B model used to eat ~50Gb of RAM!
The common workarounds to reduce memory load are:
- separate models on multiple GPUs ("sharding")
- quantize models: encode weights on fewer bits
Another technique is to 𝙥𝙧𝙤𝙟𝙚𝙘𝙩 𝙩𝙝𝙚 𝙬𝙚𝙞𝙜𝙝𝙩 𝙢𝙖𝙩𝙧𝙞𝙭 𝙩𝙤 𝙡𝙤𝙬𝙚𝙧-𝙧𝙖𝙣𝙠 𝙨𝙥𝙖𝙘𝙚𝙨, (since sometimes the weights do not really vary on all dimensions): this can save a lot of space!
This low-rank projection can be done on adapters to preserve the original weights (go check out LoRA), but it still generally hurts the performance too much for pre-training.
➡️ Enter the authors of 𝘎𝘢𝘓𝘰𝘳𝘦: 𝘔𝘦𝘮𝘰𝘳𝘺-𝘌𝘧𝘧𝘪𝘤𝘪𝘦𝘯𝘵 𝘓𝘓𝘔 𝘛𝘳𝘢𝘪𝘯𝘪𝘯𝘨 𝘣𝘺 𝘎𝘳𝘢𝘥𝘪𝘦𝘯𝘵 𝘓𝘰𝘸-𝘙𝘢𝘯𝘬 𝘗𝘳𝘰𝘫𝘦𝘤𝘵𝘪𝘰𝘯. They gather (and prove) interesting insights:
⛔ The weight matrix does not reliably converge to lower ranks during training.
✅ But the gradient matrix does!
Based on these insights, 𝘁𝗵𝗲𝘆 𝗯𝘂𝗶𝗹𝗱 𝗚𝗮𝗟𝗼𝗿𝗲, that projects the gradient to lower ranks.
🗺️ 𝗚𝗿𝗲𝗮𝘁 𝗶𝗱𝗲𝗮: to leave the optimization free to explore more space, they periodically re-build the low-rank projection throughout the training (a nice illustration is in the paper).
🤝 This method can even be combined with previous ones such as 8-bit Adam (quantizing the optimizer states to 8-bit).
➡️ 𝐑𝐞𝐬𝐮𝐥𝐭𝐬:
📉 Of course, huge reduction in memory footprint allowing the training on consumer-grade GPU (cf figure).
💪 No reduction in performance: this scales well up to 7B parameters (and was independently confirmed since) ⇒ this is essential, it confirms that the method is viable!
Read the full paper here: https://huggingface.co/papers/2403.03507 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg",
"fullname": "Aymeric Roucher",
"name": "m-ric",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 494,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/63d10d4e8eaa4831005e92b5/jyMHVdDKT3D6sVnTOyecD.png"
}
] | [] | [
{
"reaction": "🔥",
"users": [
"victor",
"osanseviero",
"Anthonyg5005",
"Cartinoe5930",
"t1u1",
"Citaman",
"loubnabnl",
"AdinaY",
"Taylor658",
"impactframes",
"avinash02",
"florentgbelidji",
"Epiculous"
],
"count": 13
},
{
"reaction": "🚀",
"users": [
"Theli",
"Citaman",
"impactframes",
"Epiculous"
],
"count": 4
},
{
"reaction": "👀",
"users": [
"Citaman",
"impactframes"
],
"count": 2
},
{
"reaction": "👍",
"users": [
"dashfunnydashdash"
],
"count": 1
}
] | 2024-03-13T17:30:17.000Z | 2024-03-13T17:30:51.597Z | [] | /posts/m-ric/307659739231169 | 496 | 0 |
262084143367917 | [
{
"type": "text",
"value": "🔥 Community and Data Quality Are More For Alignment ",
"raw": "🔥 Community and Data Quality Are More For Alignment ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "A recipe to replicate SPIN (Self-Play Fine Tuning) with 30x less data:",
"raw": "A recipe to replicate SPIN (Self-Play Fine Tuning) with 30x less data:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🗣️ 50K samples vs 1.8K prompts curated by the 350+ amazing DIBT contributors.",
"raw": "🗣️ 50K samples vs 1.8K prompts curated by the 350+ amazing DIBT contributors.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "⚗️ Distillation of Mistral Large instead of OpenAI",
"raw": "⚗️ Distillation of Mistral Large instead of OpenAI",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🙌 Open data & code with ⚗️distilabel",
"raw": "🙌 Open data & code with ⚗️distilabel",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "SPIN Paper:",
"raw": "SPIN Paper:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2401.01335",
"href": null,
"resource": {
"type": "paper",
"id": "2401.01335",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2401.01335",
"code": null,
"user": null,
"label": "Self-Play Fine-Tuning Converts Weak Language Models to Strong Language\n Models (2401.01335)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "SPIN DIBT Collection with datasets and models:",
"raw": "SPIN DIBT Collection with datasets and models:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/argilla/dibt-prompt-collective-spin-65ef59062518776024395fc3",
"href": null,
"resource": {
"type": "collection",
"id": "argilla/dibt-prompt-collective-spin-65ef59062518776024395fc3",
"discussionNum": null
},
"url": "https://huggingface.co/collections/argilla/dibt-prompt-collective-spin-65ef59062518776024395fc3",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Repo:",
"raw": "Repo:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/argilla-io/distilabel-spin-dibt",
"href": "https://github.com/argilla-io/distilabel-spin-dibt",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Joint work with the amazing DIBT community 👇",
"raw": "Joint work with the amazing DIBT community 👇",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@aashish1904",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "aashish1904",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@flozi00",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "flozi00",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@sayhan",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "sayhan",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@munish0838",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "munish0838",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@0-hero",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "0-hero",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@dvilasuero",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "dvilasuero",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@eren23",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "eren23",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@davanstrien",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "davanstrien",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@ahnz",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "ahnz",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@BlackKakapo",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "BlackKakapo",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@kitano-o",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "kitano-o",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@mmhamdy",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "mmhamdy",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@sdiazlor",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "sdiazlor",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@Stopwolf",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "Stopwolf",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@gabrielmbmb",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "gabrielmbmb",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@tculler91",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "tculler91",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@plaguss",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "plaguss",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@ignacioct",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "ignacioct",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@Hugi-R",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "Hugi-R",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@davidberenstein1957",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "davidberenstein1957",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@Korla",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "Korla",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@alvarobartt",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "alvarobartt",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@Hugs4Llamas",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "Hugs4Llamas",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@Sumandora",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "Sumandora",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@nataliaElv",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "nataliaElv",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@jfcalvo",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "jfcalvo",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@Averill",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "Averill",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@steventrouble",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "steventrouble",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@vasilis",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "vasilis",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@aeros93",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "aeros93",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@kayyshf",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "kayyshf",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@thomasgauthier",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "thomasgauthier",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@jeromebas",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "jeromebas",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@Ameeeee",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "Ameeeee",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@ayoubelmhamdi",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "ayoubelmhamdi",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@TuringsSolutions",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "TuringsSolutions",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@efels",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "efels",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@Haleyok",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "Haleyok",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@abrazador",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "abrazador",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@emessy",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "emessy",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@Nindaleth",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "Nindaleth",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@burtenshaw",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "burtenshaw",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@vicgalle",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "vicgalle",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@CortexPE",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "CortexPE",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@casey-martin",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "casey-martin",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@Leire-aguirre-eguiluz",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "Leire-aguirre-eguiluz",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@mrfakename",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "mrfakename",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@Portias600kNeurons",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "Portias600kNeurons",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@nathaliepett",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "nathaliepett",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@Filippo",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "Filippo",
"label": null,
"lang": null
}
] | 🔥 Community and Data Quality Are More For Alignment
A recipe to replicate SPIN (Self-Play Fine Tuning) with 30x less data:
🗣️ 50K samples vs 1.8K prompts curated by the 350+ amazing DIBT contributors.
⚗️ Distillation of Mistral Large instead of OpenAI
🙌 Open data & code with ⚗️distilabel
SPIN Paper:
https://huggingface.co/papers/2401.01335
SPIN DIBT Collection with datasets and models:
https://huggingface.co/collections/argilla/dibt-prompt-collective-spin-65ef59062518776024395fc3
Repo:
https://github.com/argilla-io/distilabel-spin-dibt
Joint work with the amazing DIBT community 👇
@aashish1904, @flozi00, @sayhan, @munish0838, @0-hero, @dvilasuero, @eren23, @davanstrien, @ahnz, @BlackKakapo, @kitano-o, @mmhamdy, @sdiazlor, @Stopwolf, @gabrielmbmb, @tculler91, @plaguss, @ignacioct, @Hugi-R, @davidberenstein1957, @Korla, @alvarobartt, @Hugs4Llamas, @Sumandora, @nataliaElv, @jfcalvo, @Averill, @steventrouble, @vasilis, @aeros93, @kayyshf, @thomasgauthier, @jeromebas, @Ameeeee, @ayoubelmhamdi, @TuringsSolutions, @efels, @Haleyok, @abrazador, @emessy, @Nindaleth, @burtenshaw, @vicgalle, @CortexPE, @casey-martin, @Leire-aguirre-eguiluz, @mrfakename, @Portias600kNeurons, @nathaliepett, @Filippo | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60420dccc15e823a685f2b03/Dn7QTyy9SZ7jKN6xpufVD.png",
"fullname": "Daniel Vila",
"name": "dvilasuero",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 231,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/60420dccc15e823a685f2b03/cES98vU-_RsG0Agvq04VH.png"
}
] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6382255fcae34727b9cc149e/PYiwi8LVZParYvImmcGez.png",
"fullname": "Ram",
"name": "0-hero",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 32
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64cb2c45c7f30fbf7b846c3c/6zUC-gdE53BKD_m_SvuTZ.png",
"fullname": "Aashish Kumar",
"name": "aashish1904",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 19
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1632945662592-noauth.jpeg",
"fullname": "Mario Bonilla",
"name": "abrazador",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 3
},
{
"avatarUrl": "/avatars/760648e85f258ece7d821bb6f3dce9ff.svg",
"fullname": "Aeros",
"name": "aeros93",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2
},
{
"avatarUrl": "/avatars/17ace40a24e2334d548fb6ab4f0fa406.svg",
"fullname": "Adam Holland",
"name": "ahnz",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 3
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60f0608166e5701b80ed3f02/BHso-wSWpR9b8b8CKvodC.jpeg",
"fullname": "Alvaro Bartolome",
"name": "alvarobartt",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 1739
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63e27f0f1f963b8f20f4a10d/n9KcVAzZDfymP9j_jpTRc.jpeg",
"fullname": "Ame Vi",
"name": "Ameeeee",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 29
},
{
"avatarUrl": "/avatars/d9a1a999ab0cdc621519347ef6807e23.svg",
"fullname": "Roy",
"name": "Averill",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2
},
{
"avatarUrl": "/avatars/006f9f2bc37ac246442e8847a374a788.svg",
"fullname": "smp",
"name": "ayoubelmhamdi",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 3
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/627b8f42fbab61b048ea6ec6/_AW4GfFuPfCB41cXWvsXl.png",
"fullname": "Alexandru Petrachi",
"name": "BlackKakapo",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 9
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62d648291fa3e4e7ae3fa6e8/oatOwf8Xqe5eDbCSuYqCd.png",
"fullname": "ben burtenshaw",
"name": "burtenshaw",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 67
},
{
"avatarUrl": "/avatars/b1d15fda1cabbe14d7f0b05db6dc8172.svg",
"fullname": "Casey",
"name": "casey-martin",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 6
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65a6074a668b3906b85cc777/ant1RH24XAxjAM0R7PkXf.png",
"fullname": "marshall",
"name": "CortexPE",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 3
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1627505688463-60107b385ac3e86b3ea4fc34.jpeg",
"fullname": "Daniel van Strien",
"name": "davanstrien",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 410
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1677141720071-634ff41ff32062e9eb7b06a3.jpeg",
"fullname": "David Berenstein",
"name": "davidberenstein1957",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 167
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60420dccc15e823a685f2b03/Dn7QTyy9SZ7jKN6xpufVD.png",
"fullname": "Daniel Vila",
"name": "dvilasuero",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 231
},
{
"avatarUrl": "/avatars/f5bfe4dfccc22f88713dffe5383fdb0d.svg",
"fullname": "Daniel R Armstrong",
"name": "efels",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 3
},
{
"avatarUrl": "/avatars/e06c65977e4469c04c1c7b2b44a5b72d.svg",
"fullname": "msmsms",
"name": "emessy",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 4
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6236052a12982296495f4674/JAnynVUkZrkzgRneTNafQ.jpeg",
"fullname": "Eren Akbulut",
"name": "eren23",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 20
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648921452640-6040119f7bfa033a4f7ca58d.jpeg",
"fullname": "Filippo B",
"name": "Filippo",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/605b1cf890a4b6bc0eef99ad/LDP6-QcCGlzMi8n5IK4Y2.jpeg",
"fullname": "Florian Zimmermeister",
"name": "flozi00",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 81
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60f2fc91b92afccb7c34b8ed/whF6nGtyTAhbtiWJJnL9e.png",
"fullname": "Gabriel Martín Blázquez",
"name": "gabrielmbmb",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 90
},
{
"avatarUrl": "/avatars/15df36215af38b992ebd6cd769496f9f.svg",
"fullname": "Hlop Klaus",
"name": "Haleyok",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null
},
{
"avatarUrl": "/avatars/beb7dec3a705fb9010620bdaf5a97ad3.svg",
"fullname": "Hugo",
"name": "Hugi-R",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2
},
{
"avatarUrl": "/avatars/960c80cdc44c81721c3db0e5c2a25eed.svg",
"fullname": "Hangou Fanou",
"name": "Hugs4Llamas",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 4
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62bb4792d31738991b21cec5/3hPWcwWmyFzDqq9OnZ7gg.jpeg",
"fullname": "Ignacio Talavera Cepeda",
"name": "ignacioct",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 15
},
{
"avatarUrl": "/avatars/ff6ec4c2c477250bb8ec0fea7f129367.svg",
"fullname": "jerome",
"name": "jeromebas",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 3
},
{
"avatarUrl": "/avatars/4144fa94024d51d1b5e3491bf03a73e1.svg",
"fullname": "José Francisco",
"name": "jfcalvo",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 17
},
{
"avatarUrl": "/avatars/0916c4e69c618d0a6bb6c05703c3893f.svg",
"fullname": "Kanha",
"name": "kayyshf",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null
},
{
"avatarUrl": "/avatars/9ab78181bb763debdf149638f74cee54.svg",
"fullname": "Alex",
"name": "kitano-o",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 4
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/VBMJkDYo1G0hUjIdDX-ON.jpeg",
"fullname": "Korla Baier",
"name": "Korla",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 4
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1650745211725-noauth.png",
"fullname": "Mohammed Hamdy",
"name": "mmhamdy",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 38
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62e54f0eae9d3f10acb95cb9/VAyk05hqB3OZWXEZW-B0q.png",
"fullname": "mrfakename",
"name": "mrfakename",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 969
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63fcd366ed9eead590f571a2/f8sugzrlN8x_MdIKORqBl.jpeg",
"fullname": "Munish Kumar",
"name": "munish0838",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 22
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63f7888abd28622c9b9a0b80/5t6JU_Cm7yFYTRUGr9eqH.jpeg",
"fullname": "Natalia Elvira",
"name": "nataliaElv",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 30
},
{
"avatarUrl": "/avatars/7629ff4ed696900bfa46031545da6ac1.svg",
"fullname": "Nathalie Pett",
"name": "nathaliepett",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 3
},
{
"avatarUrl": "/avatars/67e8e2983b3187cd2425fdff1bd249fa.svg",
"fullname": "Rad Fox",
"name": "Nindaleth",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 3
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6435d564a4bd75c62cc03701/7P2G_wVNB6MISp2Phh427.jpeg",
"fullname": "Agustín Piqueres Lajarín",
"name": "plaguss",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 34
},
{
"avatarUrl": "/avatars/8f8b2f7116e4638167000f9957f73553.svg",
"fullname": "Joe Bob",
"name": "Portias600kNeurons",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65aa2d4b356bf23b4a4da247/-lRjUf8LtY0NlWw9qU5Hs.jpeg",
"fullname": "Sayhan Yalvaçer",
"name": "sayhan",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 31
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6420817bf837b31c1cfced50/09dhIVj9WNgs55PdWgHGo.jpeg",
"fullname": "Sara Han Díaz",
"name": "sdiazlor",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 28
},
{
"avatarUrl": "/avatars/bc3d6bfb24833e78a47528fd6d97b5a3.svg",
"fullname": "Steven Weiss",
"name": "steventrouble",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 4
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64d9eca170891ac9b8d9fd38/hro6Ib5hFWWNBEu9J4XMe.png",
"fullname": "Sinisa Stanivuk",
"name": "Stopwolf",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 16
},
{
"avatarUrl": "/avatars/39022165b4a70ba34ea2e383ef1036a9.svg",
"fullname": "Johannes Miesenhardt",
"name": "Sumandora",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6481472a9860cd75c2614b86/7spUtk_LviyQxFWmSs5G0.jpeg",
"fullname": "Toby Culler",
"name": "tculler91",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 4
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5ee7ba4e464d0272c8b24584/gz4_rH4N088mAty1sLWca.png",
"fullname": "Thomas Gauthier-Caron",
"name": "thomasgauthier",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 14
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png",
"fullname": "Richard A Aragon",
"name": "TuringsSolutions",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 146
},
{
"avatarUrl": "/avatars/4890dfa08bcaa70f69dcdd8dd66002de.svg",
"fullname": "Vasilis kalogiras",
"name": "vasilis",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 4
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5fad8602b8423e1d80b8a965/tRqTwcZmrGka8c1vFq2wX.jpeg",
"fullname": "Victor Gallego",
"name": "vicgalle",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 106
}
] | [
{
"reaction": "🚀",
"users": [
"davanstrien",
"nataliaElv",
"ignacioct",
"victor",
"davidberenstein1957",
"munish0838",
"0-hero",
"dvilasuero",
"julien-c",
"pacoid",
"plaguss",
"osanseviero",
"mmhamdy",
"alvarobartt",
"gabrielmbmb",
"mihaylovnikitos",
"TuringsSolutions",
"clem",
"hughte",
"BlackKakapo"
],
"count": 20
},
{
"reaction": "❤️",
"users": [
"davanstrien",
"nataliaElv",
"flozi00",
"davidberenstein1957",
"julien-c",
"pacoid",
"plaguss",
"giux78",
"osanseviero",
"mmhamdy",
"gabrielmbmb",
"BlackKakapo",
"abdullahalzubaer",
"clem"
],
"count": 14
},
{
"reaction": "🔥",
"users": [
"davanstrien",
"davidberenstein1957",
"julien-c",
"pacoid",
"osanseviero",
"mmhamdy",
"gabrielmbmb",
"BlackKakapo",
"clem",
"thomasgauthier"
],
"count": 10
},
{
"reaction": "🧠",
"users": [
"davidberenstein1957",
"julien-c",
"pacoid",
"osanseviero",
"mmhamdy",
"BlackKakapo",
"clem"
],
"count": 7
}
] | 2024-03-13T16:41:24.000Z | 2024-03-14T06:52:24.501Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1627505688463-60107b385ac3e86b3ea4fc34.jpeg",
"fullname": "Daniel van Strien",
"name": "davanstrien",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 410,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6382255fcae34727b9cc149e/PYiwi8LVZParYvImmcGez.png",
"fullname": "Ram",
"name": "0-hero",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 32,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6317aade83d8d2fd903192d9/erOwgMXc_CZih3uMoyTAp.jpeg",
"fullname": "Teknium",
"name": "teknium",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 4267,
"isFollowing": false
}
] | /posts/dvilasuero/262084143367917 | 879 | 3 |
557776533938249 | [
{
"type": "text",
"value": "MoAI",
"raw": "MoAI",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Mixture of All Intelligence for Large Language and Vision Models",
"raw": "Mixture of All Intelligence for Large Language and Vision Models",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2403.07508",
"href": null,
"resource": {
"type": "paper",
"id": "2403.07508",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2403.07508",
"code": null,
"user": null,
"label": "MoAI: Mixture of All Intelligence for Large Language and Vision Models (2403.07508)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The rise of large language models (LLMs) and instruction tuning has led to the current trend of instruction-tuned large language and vision models (LLVMs). This trend involves either meticulously curating numerous instruction tuning datasets tailored to specific objectives or enlarging LLVMs to manage vast amounts of vision language (VL) data. However, current LLVMs have disregarded the detailed and comprehensive real-world scene understanding available from specialized computer vision (CV) models in visual perception tasks such as segmentation, detection, scene graph generation (SGG), and optical character recognition (OCR). Instead, the existing LLVMs rely mainly on the large capacity and emergent capabilities of their LLM backbones. Therefore, we present a new LLVM, Mixture of All Intelligence (MoAI), which leverages auxiliary visual information obtained from the outputs of external segmentation, detection, SGG, and OCR models. MoAI operates through two newly introduced modules: MoAI-Compressor and MoAI-Mixer. After verbalizing the outputs of the external CV models, the MoAI-Compressor aligns and condenses them to efficiently use relevant auxiliary visual information for VL tasks. MoAI-Mixer then blends three types of intelligence (1) visual features, (2) auxiliary features from the external CV models, and (3) language features by utilizing the concept of Mixture of Experts. Through this integration, MoAI significantly outperforms both open-source and closed-source LLVMs in numerous zero-shot VL tasks, particularly those related to real-world scene understanding such as object existence, positions, relations, and OCR without enlarging the model size or curating extra visual instruction tuning datasets.",
"raw": "The rise of large language models (LLMs) and instruction tuning has led to the current trend of instruction-tuned large language and vision models (LLVMs). This trend involves either meticulously curating numerous instruction tuning datasets tailored to specific objectives or enlarging LLVMs to manage vast amounts of vision language (VL) data. However, current LLVMs have disregarded the detailed and comprehensive real-world scene understanding available from specialized computer vision (CV) models in visual perception tasks such as segmentation, detection, scene graph generation (SGG), and optical character recognition (OCR). Instead, the existing LLVMs rely mainly on the large capacity and emergent capabilities of their LLM backbones. Therefore, we present a new LLVM, Mixture of All Intelligence (MoAI), which leverages auxiliary visual information obtained from the outputs of external segmentation, detection, SGG, and OCR models. MoAI operates through two newly introduced modules: MoAI-Compressor and MoAI-Mixer. After verbalizing the outputs of the external CV models, the MoAI-Compressor aligns and condenses them to efficiently use relevant auxiliary visual information for VL tasks. MoAI-Mixer then blends three types of intelligence (1) visual features, (2) auxiliary features from the external CV models, and (3) language features by utilizing the concept of Mixture of Experts. Through this integration, MoAI significantly outperforms both open-source and closed-source LLVMs in numerous zero-shot VL tasks, particularly those related to real-world scene understanding such as object existence, positions, relations, and OCR without enlarging the model size or curating extra visual instruction tuning datasets.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | MoAI
Mixture of All Intelligence for Large Language and Vision Models
https://huggingface.co/papers/2403.07508
The rise of large language models (LLMs) and instruction tuning has led to the current trend of instruction-tuned large language and vision models (LLVMs). This trend involves either meticulously curating numerous instruction tuning datasets tailored to specific objectives or enlarging LLVMs to manage vast amounts of vision language (VL) data. However, current LLVMs have disregarded the detailed and comprehensive real-world scene understanding available from specialized computer vision (CV) models in visual perception tasks such as segmentation, detection, scene graph generation (SGG), and optical character recognition (OCR). Instead, the existing LLVMs rely mainly on the large capacity and emergent capabilities of their LLM backbones. Therefore, we present a new LLVM, Mixture of All Intelligence (MoAI), which leverages auxiliary visual information obtained from the outputs of external segmentation, detection, SGG, and OCR models. MoAI operates through two newly introduced modules: MoAI-Compressor and MoAI-Mixer. After verbalizing the outputs of the external CV models, the MoAI-Compressor aligns and condenses them to efficiently use relevant auxiliary visual information for VL tasks. MoAI-Mixer then blends three types of intelligence (1) visual features, (2) auxiliary features from the external CV models, and (3) language features by utilizing the concept of Mixture of Experts. Through this integration, MoAI significantly outperforms both open-source and closed-source LLVMs in numerous zero-shot VL tasks, particularly those related to real-world scene understanding such as object existence, positions, relations, and OCR without enlarging the model size or curating extra visual instruction tuning datasets. | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674929746905-60f1abe7544c2adfd699860c.jpeg",
"fullname": "AK",
"name": "akhaliq",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 5205,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/60f1abe7544c2adfd699860c/Y2To34FNWVUbSmZicK0Y4.png"
}
] | [] | [
{
"reaction": "👍",
"users": [
"sugatoray",
"jucamohedano",
"maywell",
"damerajee"
],
"count": 4
}
] | 2024-03-13T15:44:39.000Z | 2024-03-13T15:44:39.952Z | [] | /posts/akhaliq/557776533938249 | 471 | 0 |
306130134192217 | [
{
"type": "text",
"value": "Working on a new open-source project to showcase ",
"raw": "Working on a new open-source project to showcase ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/levihsu/OOTDiffusion",
"href": null,
"resource": {
"type": "model",
"id": "levihsu/OOTDiffusion",
"discussionNum": null
},
"url": "https://huggingface.co/levihsu/OOTDiffusion",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " 🤗 ",
"raw": " 🤗 ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "But it's not a Space 🫢 Can you guess what it does? 👀 ",
"raw": "But it's not a Space 🫢 Can you guess what it does? 👀 ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Working on a new open-source project to showcase https://huggingface.co/levihsu/OOTDiffusion 🤗
But it's not a Space 🫢 Can you guess what it does? 👀 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/2RK8J_YSNAK2ob8XZH7w2.jpeg",
"fullname": "Julian Bilcke",
"name": "jbilcke-hf",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 1312,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/64904918584563e08e84d39b/QsYeaG4tFsSaPK-LNiP-J.png"
}
] | [] | [
{
"reaction": "👍",
"users": [
"Davamax",
"damerajee"
],
"count": 2
}
] | 2024-03-13T15:40:07.000Z | 2024-03-13T15:40:07.924Z | [] | /posts/jbilcke-hf/306130134192217 | 3,910 | 0 |
286750138532677 | [
{
"type": "text",
"value": "Can we improve the quality of open LLMs for more languages?",
"raw": "Can we improve the quality of open LLMs for more languages?",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Step 1: Evaluate current SOTA.",
"raw": "Step 1: Evaluate current SOTA.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The Data Is Better Together community has rated more than 10K prompts for quality. We now want to translate a subset of these to help address the language gap in model evals.",
"raw": "The Data Is Better Together community has rated more than 10K prompts for quality. We now want to translate a subset of these to help address the language gap in model evals.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The plan is roughly this:",
"raw": "The plan is roughly this:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- We started with ",
"raw": "- We started with ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/datasets/DIBT/10k_prompts_ranked",
"href": "https://huggingface.co/datasets/DIBT/10k_prompts_ranked",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " and took a subset of 500 high-quality prompts",
"raw": " and took a subset of 500 high-quality prompts",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- We're asking the community to translate these prompts into different languages",
"raw": "- We're asking the community to translate these prompts into different languages",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- We'll evaluate the extent to which we can use AlpacaEval and similar approaches to rate the outputs of models across these different languages",
"raw": "- We'll evaluate the extent to which we can use AlpacaEval and similar approaches to rate the outputs of models across these different languages",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- If it works well, we can more easily evaluate open LLMs across different languages by using a judge LLM to rate the quality of outputs from different models.",
"raw": "- If it works well, we can more easily evaluate open LLMs across different languages by using a judge LLM to rate the quality of outputs from different models.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "You can find more details in our new GitHub repo: ",
"raw": "You can find more details in our new GitHub repo: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/huggingface/data-is-better-together",
"href": "https://github.com/huggingface/data-is-better-together",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " (don't forget to give it a ⭐!) ",
"raw": " (don't forget to give it a ⭐!) ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Can we improve the quality of open LLMs for more languages?
Step 1: Evaluate current SOTA.
The Data Is Better Together community has rated more than 10K prompts for quality. We now want to translate a subset of these to help address the language gap in model evals.
The plan is roughly this:
- We started with https://huggingface.co/datasets/DIBT/10k_prompts_ranked and took a subset of 500 high-quality prompts
- We're asking the community to translate these prompts into different languages
- We'll evaluate the extent to which we can use AlpacaEval and similar approaches to rate the outputs of models across these different languages
- If it works well, we can more easily evaluate open LLMs across different languages by using a judge LLM to rate the quality of outputs from different models.
You can find more details in our new GitHub repo: https://github.com/huggingface/data-is-better-together (don't forget to give it a ⭐!) | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1627505688463-60107b385ac3e86b3ea4fc34.jpeg",
"fullname": "Daniel van Strien",
"name": "davanstrien",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 410,
"isFollowing": false
} | [] | [] | [
{
"reaction": "❤️",
"users": [
"dvilasuero",
"clefourrier",
"samusenps",
"sarahooker",
"Joseph717171"
],
"count": 5
},
{
"reaction": "🤗",
"users": [
"dvilasuero",
"sarahooker",
"Joseph717171",
"seyf1elislam"
],
"count": 4
},
{
"reaction": "🚀",
"users": [
"dvilasuero",
"sarahooker",
"Joseph717171"
],
"count": 3
},
{
"reaction": "🔥",
"users": [
"dvilasuero",
"sarahooker",
"Joseph717171"
],
"count": 3
},
{
"reaction": "🤝",
"users": [
"victor",
"sarahooker",
"Joseph717171"
],
"count": 3
}
] | 2024-03-13T15:03:55.000Z | 2024-03-13T15:03:55.296Z | [] | /posts/davanstrien/286750138532677 | 393 | 0 |
998774905807548 | [
{
"type": "text",
"value": "I love vision language models 💗 ",
"raw": "I love vision language models 💗 ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "My favorite is KOSMOS-2, because it's a grounded model (it doesn't hallucinate). ",
"raw": "My favorite is KOSMOS-2, because it's a grounded model (it doesn't hallucinate). ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "In this demo you can,",
"raw": "In this demo you can,",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- ask a question about the image, ",
"raw": "- ask a question about the image, ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- do detailed/brief captioning, ",
"raw": "- do detailed/brief captioning, ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- localize the objects! 🤯 ",
"raw": "- localize the objects! 🤯 ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "It's just amazing for VLM to return bounding boxes 🤩 ",
"raw": "It's just amazing for VLM to return bounding boxes 🤩 ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Try it here ",
"raw": "Try it here ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/merve/kosmos2",
"href": null,
"resource": {
"type": "space",
"id": "merve/kosmos2",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/merve/kosmos2",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | I love vision language models 💗
My favorite is KOSMOS-2, because it's a grounded model (it doesn't hallucinate).
In this demo you can,
- ask a question about the image,
- do detailed/brief captioning,
- localize the objects! 🤯
It's just amazing for VLM to return bounding boxes 🤩
Try it here https://huggingface.co/spaces/merve/kosmos2 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648113222875-6141a88b3a0ec78603c9e784.png",
"fullname": "Merve Noyan",
"name": "merve",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 5589,
"isFollowing": false
} | [
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6141a88b3a0ec78603c9e784/tZicqE8Yw59tPTRUmtJ2q.mp4"
}
] | [] | [
{
"reaction": "🧠",
"users": [
"meryem1232",
"samusenps",
"osanseviero",
"plmsmile",
"victor",
"GO4code",
"theainerd",
"ShivjiAgnihotri"
],
"count": 8
},
{
"reaction": "👍",
"users": [
"t1u1",
"bmxtiger",
"iclalcetin",
"mathiasn1",
"Nadav314"
],
"count": 5
},
{
"reaction": "❤️",
"users": [
"alilotfi1389"
],
"count": 1
},
{
"reaction": "👀",
"users": [
"Jaward"
],
"count": 1
}
] | 2024-03-13T11:48:50.000Z | 2024-03-13T11:48:50.506Z | [] | /posts/merve/998774905807548 | 1,075 | 0 |
805387906293219 | [
{
"type": "text",
"value": "🚀🗣️🌟 New Research Alert - ICASSP 2024! 🌟🗣️🚀",
"raw": "🚀🗣️🌟 New Research Alert - ICASSP 2024! 🌟🗣️🚀",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📄 Title: AV2Wav: Diffusion-Based Re-synthesis from Continuous Self-supervised Features for Audio-Visual Speech Enhancement 🌟🚀",
"raw": "📄 Title: AV2Wav: Diffusion-Based Re-synthesis from Continuous Self-supervised Features for Audio-Visual Speech Enhancement 🌟🚀",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📝 Description: Diffused Resynthesis and HuBERT Speech Quality Enhancement.",
"raw": "📝 Description: Diffused Resynthesis and HuBERT Speech Quality Enhancement.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "👥 Authors: Ju-Chieh Chou, Chung-Ming Chien, Karen Livescu",
"raw": "👥 Authors: Ju-Chieh Chou, Chung-Ming Chien, Karen Livescu",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📅 Conference: ICASSP, 14-19 April 2024 | Seoul, Korea 🇰🇷",
"raw": "📅 Conference: ICASSP, 14-19 April 2024 | Seoul, Korea 🇰🇷",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔗 Paper: ",
"raw": "🔗 Paper: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2309.08030",
"href": null,
"resource": {
"type": "paper",
"id": "2309.08030",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2309.08030",
"code": null,
"user": null,
"label": "AV2Wav: Diffusion-Based Re-synthesis from Continuous Self-supervised\n Features for Audio-Visual Speech Enhancement (2309.08030)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🌐 Web Page: ",
"raw": "🌐 Web Page: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://home.ttic.edu/~jcchou/demo/avse/avse_demo.html",
"href": "https://home.ttic.edu/~jcchou/demo/avse/avse_demo.html",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📚 More Papers: more cutting-edge research presented at other conferences in the ",
"raw": "📚 More Papers: more cutting-edge research presented at other conferences in the ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "DmitryRyumin/NewEraAI-Papers curated by ",
"raw": "DmitryRyumin/NewEraAI-Papers curated by ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@DmitryRyumin",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "DmitryRyumin",
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🚀 Added to the Speech Enhancement Collection: ",
"raw": "🚀 Added to the Speech Enhancement Collection: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/DmitryRyumin/speech-enhancement-65de31e1b6d9a040c151702e",
"href": null,
"resource": {
"type": "collection",
"id": "DmitryRyumin/speech-enhancement-65de31e1b6d9a040c151702e",
"discussionNum": null
},
"url": "https://huggingface.co/collections/DmitryRyumin/speech-enhancement-65de31e1b6d9a040c151702e",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔍 Keywords: #AV2Wav #SpeechEnhancement #SpeechProcessing #AudioVisual #Diffusion #ICASSP2024 #Innovation",
"raw": "🔍 Keywords: #AV2Wav #SpeechEnhancement #SpeechProcessing #AudioVisual #Diffusion #ICASSP2024 #Innovation",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 🚀🗣️🌟 New Research Alert - ICASSP 2024! 🌟🗣️🚀
📄 Title: AV2Wav: Diffusion-Based Re-synthesis from Continuous Self-supervised Features for Audio-Visual Speech Enhancement 🌟🚀
📝 Description: Diffused Resynthesis and HuBERT Speech Quality Enhancement.
👥 Authors: Ju-Chieh Chou, Chung-Ming Chien, Karen Livescu
📅 Conference: ICASSP, 14-19 April 2024 | Seoul, Korea 🇰🇷
🔗 Paper: https://huggingface.co/papers/2309.08030
🌐 Web Page: https://home.ttic.edu/~jcchou/demo/avse/avse_demo.html
📚 More Papers: more cutting-edge research presented at other conferences in the
DmitryRyumin/NewEraAI-Papers curated by @DmitryRyumin
🚀 Added to the Speech Enhancement Collection: https://huggingface.co/collections/DmitryRyumin/speech-enhancement-65de31e1b6d9a040c151702e
🔍 Keywords: #AV2Wav #SpeechEnhancement #SpeechProcessing #AudioVisual #Diffusion #ICASSP2024 #Innovation | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg",
"fullname": "Dmitry Ryumin",
"name": "DmitryRyumin",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 377,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/O3eVBfp9ikMQcOM2ad1sj.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/qaFLn6Fl4eLg31YWrroLt.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/3oj1vv7nyIgJgbhOnILVg.png"
}
] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg",
"fullname": "Dmitry Ryumin",
"name": "DmitryRyumin",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 377
}
] | [
{
"reaction": "❤️",
"users": [
"DmitryRyumin",
"samusenps",
"taufiqdp",
"enzostvs",
"osanseviero",
"merve",
"victor",
"Shir02021"
],
"count": 8
},
{
"reaction": "😎",
"users": [
"victor",
"taufiqdp",
"merve"
],
"count": 3
},
{
"reaction": "🤯",
"users": [
"merve",
"osanseviero"
],
"count": 2
},
{
"reaction": "🚀",
"users": [
"enzostvs"
],
"count": 1
},
{
"reaction": "🧠",
"users": [
"merve"
],
"count": 1
}
] | 2024-03-13T10:08:39.000Z | 2024-03-13T10:08:39.721Z | [] | /posts/DmitryRyumin/805387906293219 | 274 | 0 |
127895284909100 | [
{
"type": "text",
"value": "Diaries of Open Source. Part 4!",
"raw": "Diaries of Open Source. Part 4!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🌏Cohere and Cohere4AI release Command-R, a 35B model that is multilingual, RAG-optimized, and can manage tools!",
"raw": "🌏Cohere and Cohere4AI release Command-R, a 35B model that is multilingual, RAG-optimized, and can manage tools!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Model: ",
"raw": "Model: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/CohereForAI/c4ai-command-r-v01",
"href": null,
"resource": {
"type": "model",
"id": "CohereForAI/c4ai-command-r-v01",
"discussionNum": null
},
"url": "https://huggingface.co/CohereForAI/c4ai-command-r-v01",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Blog post: ",
"raw": "Blog post: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://txt.cohere.com/command-r/",
"href": "https://txt.cohere.com/command-r/",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🧑🍳StarChat2: A powerful code model that is conversational ",
"raw": "🧑🍳StarChat2: A powerful code model that is conversational ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Try it out: ",
"raw": "Try it out: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/HuggingFaceH4/starchat2-playground",
"href": null,
"resource": {
"type": "space",
"id": "HuggingFaceH4/starchat2-playground",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/HuggingFaceH4/starchat2-playground",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Repos: ",
"raw": "Repos: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/HuggingFaceH4/starchat2-15b-65f068417b330fafad751fce",
"href": null,
"resource": {
"type": "collection",
"id": "HuggingFaceH4/starchat2-15b-65f068417b330fafad751fce",
"discussionNum": null
},
"url": "https://huggingface.co/collections/HuggingFaceH4/starchat2-15b-65f068417b330fafad751fce",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Training code: ",
"raw": "Training code: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/huggingface/alignment-handbook/tree/main/recipes/starchat2-15b",
"href": "https://github.com/huggingface/alignment-handbook/tree/main/recipes/starchat2-15b",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🐲Yi-9B: trained on 3 trillion tokens, this english-chinese LLM is quite good and with a very nice detailed report!",
"raw": "🐲Yi-9B: trained on 3 trillion tokens, this english-chinese LLM is quite good and with a very nice detailed report!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Model: ",
"raw": "Model: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/01-ai/Yi-9B",
"href": null,
"resource": {
"type": "model",
"id": "01-ai/Yi-9B",
"discussionNum": null
},
"url": "https://huggingface.co/01-ai/Yi-9B",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Paper: ",
"raw": "Paper: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2403.04652",
"href": null,
"resource": {
"type": "paper",
"id": "2403.04652",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2403.04652",
"code": null,
"user": null,
"label": "Yi: Open Foundation Models by 01.AI (2403.04652)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🐋DeepSeek-VL, 1.3B and 7B VLMs",
"raw": "🐋DeepSeek-VL, 1.3B and 7B VLMs",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Paper: ",
"raw": "Paper: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/papers/2403.05525",
"href": null,
"resource": {
"type": "paper",
"id": "2403.05525",
"discussionNum": null
},
"url": "https://hf.co/papers/2403.05525",
"code": null,
"user": null,
"label": "DeepSeek-VL: Towards Real-World Vision-Language Understanding (2403.05525)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Large model: ",
"raw": "Large model: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/deepseek-ai/deepseek-vl-7b-chat",
"href": null,
"resource": {
"type": "model",
"id": "deepseek-ai/deepseek-vl-7b-chat",
"discussionNum": null
},
"url": "https://huggingface.co/deepseek-ai/deepseek-vl-7b-chat",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "✍️Writer releases OmniACT: a dataset for multimodal agents for desktop and web.",
"raw": "✍️Writer releases OmniACT: a dataset for multimodal agents for desktop and web.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Dataset: ",
"raw": "Dataset: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/Writer/omniact",
"href": null,
"resource": {
"type": "dataset",
"id": "Writer/omniact",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/Writer/omniact",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Paper: ",
"raw": "Paper: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2402.17553",
"href": null,
"resource": {
"type": "paper",
"id": "2402.17553",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2402.17553",
"code": null,
"user": null,
"label": "OmniACT: A Dataset and Benchmark for Enabling Multimodal Generalist\n Autonomous Agents for Desktop and Web (2402.17553)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🍎Apple releases MobileCLIP: fast image-text models! ",
"raw": "🍎Apple releases MobileCLIP: fast image-text models! ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/apple/ml-mobileclip",
"href": "https://github.com/apple/ml-mobileclip",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🦙💪LlamaGym - fine-tune LLM agents with RL in just a few lines of code! ",
"raw": "🦙💪LlamaGym - fine-tune LLM agents with RL in just a few lines of code! ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/KhoomeiK/LlamaGym",
"href": "https://github.com/KhoomeiK/LlamaGym",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " 🖼️New multimodal leaderboard ConTextual ",
"raw": " 🖼️New multimodal leaderboard ConTextual ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/blog/leaderboard-contextual",
"href": "https://huggingface.co/blog/leaderboard-contextual",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🎁 Design2Code: benchmark for multimodal LLMs for automating front-end development. ",
"raw": "🎁 Design2Code: benchmark for multimodal LLMs for automating front-end development. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Dataset ",
"raw": "Dataset ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/SALT-NLP/Design2Code",
"href": null,
"resource": {
"type": "dataset",
"id": "SALT-NLP/Design2Code",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/SALT-NLP/Design2Code",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Paper ",
"raw": "Paper ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2403.03163",
"href": null,
"resource": {
"type": "paper",
"id": "2403.03163",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2403.03163",
"code": null,
"user": null,
"label": "Design2Code: How Far Are We From Automating Front-End Engineering? (2403.03163)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Project ",
"raw": "Project ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://salt-nlp.github.io/Design2Code/",
"href": "https://salt-nlp.github.io/Design2Code/",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "You can find the previous part at ",
"raw": "You can find the previous part at ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/posts/osanseviero/633758457910104",
"href": "https://huggingface.co/posts/osanseviero/633758457910104",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Diaries of Open Source. Part 4!
🌏Cohere and Cohere4AI release Command-R, a 35B model that is multilingual, RAG-optimized, and can manage tools!
Model: https://huggingface.co/CohereForAI/c4ai-command-r-v01
Blog post: https://txt.cohere.com/command-r/
🧑🍳StarChat2: A powerful code model that is conversational
Try it out: https://huggingface.co/spaces/HuggingFaceH4/starchat2-playground
Repos: https://huggingface.co/collections/HuggingFaceH4/starchat2-15b-65f068417b330fafad751fce
Training code: https://github.com/huggingface/alignment-handbook/tree/main/recipes/starchat2-15b
🐲Yi-9B: trained on 3 trillion tokens, this english-chinese LLM is quite good and with a very nice detailed report!
Model: https://huggingface.co/01-ai/Yi-9B
Paper: https://huggingface.co/papers/2403.04652
🐋DeepSeek-VL, 1.3B and 7B VLMs
Paper: https://hf.co/papers/2403.05525
Large model: https://huggingface.co/deepseek-ai/deepseek-vl-7b-chat
✍️Writer releases OmniACT: a dataset for multimodal agents for desktop and web.
Dataset: https://huggingface.co/datasets/Writer/omniact
Paper: https://huggingface.co/papers/2402.17553
🍎Apple releases MobileCLIP: fast image-text models! https://github.com/apple/ml-mobileclip
🦙💪LlamaGym - fine-tune LLM agents with RL in just a few lines of code! https://github.com/KhoomeiK/LlamaGym
🖼️New multimodal leaderboard ConTextual https://huggingface.co/blog/leaderboard-contextual
🎁 Design2Code: benchmark for multimodal LLMs for automating front-end development.
Dataset https://huggingface.co/datasets/SALT-NLP/Design2Code
Paper https://huggingface.co/papers/2403.03163
Project https://salt-nlp.github.io/Design2Code/
You can find the previous part at https://huggingface.co/posts/osanseviero/633758457910104 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6032802e1f993496bc14d9e3/w6hr-DEQot4VVkoyRIBiy.png",
"fullname": "Omar Sanseviero",
"name": "osanseviero",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2868,
"isFollowing": false
} | [] | [] | [
{
"reaction": "❤️",
"users": [
"samusenps",
"victor",
"kramp",
"DmitryRyumin",
"adamelliotfields",
"merve",
"mohammedbriman",
"giux78",
"MichaelFried",
"theainerd",
"avinash02",
"VanshRana12"
],
"count": 12
},
{
"reaction": "🔥",
"users": [
"merve",
"visheratin",
"giux78",
"theainerd"
],
"count": 4
}
] | 2024-03-13T09:09:58.000Z | 2024-03-13T09:10:10.986Z | [] | /posts/osanseviero/127895284909100 | 309 | 0 |
251337834457146 | [
{
"type": "text",
"value": "You gotta love what Aapple’s mlx team cooked:",
"raw": "You gotta love what Aapple’s mlx team cooked:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- A unified memory model that literally does compute-magic: parallel operations with automatic dependency insertions.",
"raw": "- A unified memory model that literally does compute-magic: parallel operations with automatic dependency insertions.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Supports off-the-shelf use of all the fun stuff in composable func transformations (differentiation, vectorization, computation graph optimization).",
"raw": "- Supports off-the-shelf use of all the fun stuff in composable func transformations (differentiation, vectorization, computation graph optimization).",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Houses simplified forms of all the APIs we love and in the languages we adore (python, C++, C) sorry Swift :)",
"raw": "- Houses simplified forms of all the APIs we love and in the languages we adore (python, C++, C) sorry Swift :)",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- mlx.nn is a stallion 🔥 simple to use.",
"raw": "- mlx.nn is a stallion 🔥 simple to use.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Open-source friendly (who would have thought lol).",
"raw": "- Open-source friendly (who would have thought lol).",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Dynamic graph construction👍🏼",
"raw": "- Dynamic graph construction👍🏼",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Supports both CPU and GPU🤖",
"raw": "- Supports both CPU and GPU🤖",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Beginner Friendly 👌🏼",
"raw": "- Beginner Friendly 👌🏼",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Great examples (clean code💯)",
"raw": "- Great examples (clean code💯)",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Good documentation",
"raw": "- Good documentation",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Well done Awni Hannun et al 👏🏼",
"raw": "Well done Awni Hannun et al 👏🏼",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Could this be The Transformer of ml frameworks? Well at least for us mac users 😂",
"raw": "Could this be The Transformer of ml frameworks? Well at least for us mac users 😂",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Repo: ",
"raw": "Repo: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/ml-explore/mlx",
"href": "https://github.com/ml-explore/mlx",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Examples: ",
"raw": "Examples: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/ml-explore/mlx-examples",
"href": "https://github.com/ml-explore/mlx-examples",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Documentation: ",
"raw": "Documentation: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://ml-explore.github.io/mlx/build/html/python/nn.html",
"href": "https://ml-explore.github.io/mlx/build/html/python/nn.html",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | You gotta love what Aapple’s mlx team cooked:
- A unified memory model that literally does compute-magic: parallel operations with automatic dependency insertions.
- Supports off-the-shelf use of all the fun stuff in composable func transformations (differentiation, vectorization, computation graph optimization).
- Houses simplified forms of all the APIs we love and in the languages we adore (python, C++, C) sorry Swift :)
- mlx.nn is a stallion 🔥 simple to use.
- Open-source friendly (who would have thought lol).
- Dynamic graph construction👍🏼
- Supports both CPU and GPU🤖
- Beginner Friendly 👌🏼
- Great examples (clean code💯)
- Good documentation
Well done Awni Hannun et al 👏🏼
Could this be The Transformer of ml frameworks? Well at least for us mac users 😂
Repo: https://github.com/ml-explore/mlx
Examples: https://github.com/ml-explore/mlx-examples
Documentation: https://ml-explore.github.io/mlx/build/html/python/nn.html | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6438a9027de34e8ea7e4b257/vib8QSd1AWMr_bR9ig_xJ.jpeg",
"fullname": "Jaward Sesay",
"name": "Jaward",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 191,
"isFollowing": false
} | [
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/JOC7BvwEjJVRrdMJf8kqW.mp4"
}
] | [] | [
{
"reaction": "❤️",
"users": [
"osanseviero",
"victor",
"awni",
"merve",
"Federic"
],
"count": 5
}
] | 2024-03-13T04:45:49.000Z | 2024-03-13T08:43:54.242Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6032802e1f993496bc14d9e3/w6hr-DEQot4VVkoyRIBiy.png",
"fullname": "Omar Sanseviero",
"name": "osanseviero",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2868,
"isFollowing": false
}
] | /posts/Jaward/251337834457146 | 201 | 1 |
971433138094349 | [
{
"type": "text",
"value": "🚀 Excited to unveil the Augmented ARC-Challenge Dataset with Chain-of-Thought Reasoning! 🧠✨",
"raw": "🚀 Excited to unveil the Augmented ARC-Challenge Dataset with Chain-of-Thought Reasoning! 🧠✨",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📚 Created by enhancing the ARC dataset with AI-generated reasoning from Google's Gemini Pro, this resource aims to improve question answering models' ability to tackle complex science queries. ",
"raw": "📚 Created by enhancing the ARC dataset with AI-generated reasoning from Google's Gemini Pro, this resource aims to improve question answering models' ability to tackle complex science queries. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔍 Features:",
"raw": "🔍 Features:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- 1068 training examples",
"raw": "- 1068 training examples",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Detailed reasoning steps for nuanced understanding",
"raw": "- Detailed reasoning steps for nuanced understanding",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Questions spanning physics, chemistry, biology, & more!",
"raw": "- Questions spanning physics, chemistry, biology, & more!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🌟 Ideal for benchmarking QA models, enhancing model interpretability, and studying in-context examples.",
"raw": "🌟 Ideal for benchmarking QA models, enhancing model interpretability, and studying in-context examples.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔗 Dive in and help your models learn the art of reasoning! ",
"raw": "🔗 Dive in and help your models learn the art of reasoning! ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔎 Explore more: ",
"raw": "🔎 Explore more: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/Locutusque/arc-cot",
"href": null,
"resource": {
"type": "dataset",
"id": "Locutusque/arc-cot",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/Locutusque/arc-cot",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 🚀 Excited to unveil the Augmented ARC-Challenge Dataset with Chain-of-Thought Reasoning! 🧠✨
📚 Created by enhancing the ARC dataset with AI-generated reasoning from Google's Gemini Pro, this resource aims to improve question answering models' ability to tackle complex science queries.
🔍 Features:
- 1068 training examples
- Detailed reasoning steps for nuanced understanding
- Questions spanning physics, chemistry, biology, & more!
🌟 Ideal for benchmarking QA models, enhancing model interpretability, and studying in-context examples.
🔗 Dive in and help your models learn the art of reasoning!
🔎 Explore more: https://huggingface.co/datasets/Locutusque/arc-cot
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/YeFyz1AZVcCRsyNHHtwJG.jpeg",
"fullname": "Sebastian Gabarain",
"name": "Locutusque",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 180,
"isFollowing": false
} | [] | [] | [
{
"reaction": "❤️",
"users": [
"samusenps",
"euclaise",
"osanseviero",
"diwank"
],
"count": 4
},
{
"reaction": "🤗",
"users": [
"ZennyKenny",
"mlabonne"
],
"count": 2
},
{
"reaction": "👍",
"users": [
"dashfunnydashdash"
],
"count": 1
}
] | 2024-03-13T02:36:06.000Z | 2024-03-13T02:36:06.826Z | [] | /posts/Locutusque/971433138094349 | 1,149 | 0 |
679501505814075 | [
{
"type": "text",
"value": "Stealing Part of a Production Language Model",
"raw": "Stealing Part of a Production Language Model",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2403.06634",
"href": null,
"resource": {
"type": "paper",
"id": "2403.06634",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2403.06634",
"code": null,
"user": null,
"label": "Stealing Part of a Production Language Model (2403.06634)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "We introduce the first model-stealing attack that extracts precise, nontrivial information from black-box production language models like OpenAI's ChatGPT or Google's PaLM-2. Specifically, our attack recovers the embedding projection layer (up to symmetries) of a transformer model, given typical API access. For under \\20 USD, our attack extracts the entire projection matrix of OpenAI's Ada and Babbage language models. We thereby confirm, for the first time, that these black-box models have a hidden dimension of 1024 and 2048, respectively. We also recover the exact hidden dimension size of the gpt-3.5-turbo model, and estimate it would cost under 2,000 in queries to recover the entire projection matrix. We conclude with potential defenses and mitigations, and discuss the implications of possible future work that could extend our attack.",
"raw": "We introduce the first model-stealing attack that extracts precise, nontrivial information from black-box production language models like OpenAI's ChatGPT or Google's PaLM-2. Specifically, our attack recovers the embedding projection layer (up to symmetries) of a transformer model, given typical API access. For under \\20 USD, our attack extracts the entire projection matrix of OpenAI's Ada and Babbage language models. We thereby confirm, for the first time, that these black-box models have a hidden dimension of 1024 and 2048, respectively. We also recover the exact hidden dimension size of the gpt-3.5-turbo model, and estimate it would cost under 2,000 in queries to recover the entire projection matrix. We conclude with potential defenses and mitigations, and discuss the implications of possible future work that could extend our attack.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Stealing Part of a Production Language Model
https://huggingface.co/papers/2403.06634
We introduce the first model-stealing attack that extracts precise, nontrivial information from black-box production language models like OpenAI's ChatGPT or Google's PaLM-2. Specifically, our attack recovers the embedding projection layer (up to symmetries) of a transformer model, given typical API access. For under \20 USD, our attack extracts the entire projection matrix of OpenAI's Ada and Babbage language models. We thereby confirm, for the first time, that these black-box models have a hidden dimension of 1024 and 2048, respectively. We also recover the exact hidden dimension size of the gpt-3.5-turbo model, and estimate it would cost under 2,000 in queries to recover the entire projection matrix. We conclude with potential defenses and mitigations, and discuss the implications of possible future work that could extend our attack. | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674929746905-60f1abe7544c2adfd699860c.jpeg",
"fullname": "AK",
"name": "akhaliq",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 5205,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/60f1abe7544c2adfd699860c/tv3Ab9D3ev_hwAYNJgD3L.png"
}
] | [] | [
{
"reaction": "👍",
"users": [
"ajibawa-2023",
"taufiqdp",
"arnavgrg",
"adamelliotfields",
"evdcush",
"abdullahalzubaer",
"bootsbootsboots",
"philipp-zettl"
],
"count": 8
},
{
"reaction": "🤯",
"users": [
"adamelliotfields",
"CristianJD",
"diwank"
],
"count": 3
},
{
"reaction": "🧠",
"users": [
"merve"
],
"count": 1
},
{
"reaction": "🔥",
"users": [
"diwank"
],
"count": 1
}
] | 2024-03-13T02:13:19.000Z | 2024-03-14T23:11:23.206Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648113222875-6141a88b3a0ec78603c9e784.png",
"fullname": "Merve Noyan",
"name": "merve",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 5589,
"isFollowing": false
}
] | /posts/akhaliq/679501505814075 | 355 | 2 |
390309349796467 | [
{
"type": "text",
"value": "Hello World! This post is written by the Large Action Model framework LaVague! Find out more on ",
"raw": "Hello World! This post is written by the Large Action Model framework LaVague! Find out more on ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/mithril-security/LaVague",
"href": "https://github.com/mithril-security/LaVague",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Edit: Here is the video of 🌊LaVague posting this. This is quite meta ",
"raw": "Edit: Here is the video of 🌊LaVague posting this. This is quite meta ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Hello World! This post is written by the Large Action Model framework LaVague! Find out more on https://github.com/mithril-security/LaVague
Edit: Here is the video of 🌊LaVague posting this. This is quite meta | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1661497922734-62f4ac43567dbf9a39f75474.jpeg",
"fullname": "Daniel Huynh",
"name": "dhuynh95",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 75,
"isFollowing": false
} | [
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/62f4ac43567dbf9a39f75474/tMMAQHh3QkuO_cq7tM5KW.mp4"
}
] | [] | [
{
"reaction": "❤️",
"users": [
"samusenps",
"osanseviero",
"abidlabs",
"akhaliq",
"PeepDaSlan9",
"julien-c",
"eduagarcia",
"diegotluz",
"OmRajani"
],
"count": 9
},
{
"reaction": "😎",
"users": [
"samusenps",
"abidlabs",
"akhaliq",
"julien-c",
"diegotluz"
],
"count": 5
},
{
"reaction": "👀",
"users": [
"xzuyn"
],
"count": 1
},
{
"reaction": "🤯",
"users": [
"DamarJati"
],
"count": 1
}
] | 2024-03-12T22:03:20.000Z | 2024-03-14T10:34:56.033Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1621947938344-noauth.png",
"fullname": "Abubakar Abid",
"name": "abidlabs",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 487,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5dd96eb166059660ed1ee413/NQtzmrDdbG0H8qkZvRyGk.jpeg",
"fullname": "Julien Chaumond",
"name": "julien-c",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 1580,
"isFollowing": false
}
] | /posts/dhuynh95/390309349796467 | 788 | 2 |
696955717140106 | [
{
"type": "text",
"value": "Can you beat an AI at Raven puzzles?",
"raw": "Can you beat an AI at Raven puzzles?",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/HuggingFaceM4/ai_raven",
"href": null,
"resource": {
"type": "space",
"id": "HuggingFaceM4/ai_raven",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/HuggingFaceM4/ai_raven",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The most powerful vision+language AI systems like Gemini or GPT4V struggle with this problem when used out-of-the-box (",
"raw": "The most powerful vision+language AI systems like Gemini or GPT4V struggle with this problem when used out-of-the-box (",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2403.04732",
"href": null,
"resource": {
"type": "paper",
"id": "2403.04732",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2403.04732",
"code": null,
"user": null,
"label": "How Far Are We from Intelligent Visual Deductive Reasoning? (2403.04732)",
"lang": null
},
{
"type": "text",
"value": ").",
"raw": ").",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "But when properly trained, a small ~8B model can be very accurate at these IQ tests, solely based on visual inputs!",
"raw": "But when properly trained, a small ~8B model can be very accurate at these IQ tests, solely based on visual inputs!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Raven's Progressive Matrices are visual intelligence tests invented in the 1930s designed to measure abstract reasoning and problem-solving ability. The test consists of a series of matrices or patterns with one part missing. The task for the test-taker is to identify the missing piece from a set of options.",
"raw": "Raven's Progressive Matrices are visual intelligence tests invented in the 1930s designed to measure abstract reasoning and problem-solving ability. The test consists of a series of matrices or patterns with one part missing. The task for the test-taker is to identify the missing piece from a set of options.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Such puzzles can be procedurally generated at scale. ",
"raw": "Such puzzles can be procedurally generated at scale. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/HuggingFaceM4/RAVEN",
"href": null,
"resource": {
"type": "dataset",
"id": "HuggingFaceM4/RAVEN",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/HuggingFaceM4/RAVEN",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " is one example. The complexity of the puzzles is then controlled by the complexity of the generation procedure. ",
"raw": " is one example. The complexity of the puzzles is then controlled by the complexity of the generation procedure. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "We fine-tuned an early checkpoint of our upcoming vision-and-language model idefics2 on that dataset. The resulting checkpoint yields ~91% accuracy! No chain of thoughts, no pre-processing of the image, no additional inputs or metadata, just the RAVEN problem fed to the model as a standalone image (and a short instruction to the model “Which figure should complete the logical sequence?”), with the training objective being the standard cross-entropy.",
"raw": "We fine-tuned an early checkpoint of our upcoming vision-and-language model idefics2 on that dataset. The resulting checkpoint yields ~91% accuracy! No chain of thoughts, no pre-processing of the image, no additional inputs or metadata, just the RAVEN problem fed to the model as a standalone image (and a short instruction to the model “Which figure should complete the logical sequence?”), with the training objective being the standard cross-entropy.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Just another evidence that in a lot of cases, for a given well-scoped problem, you will be better off paying to collect & annotate data, and fine-tune a model on that data (i.e. build your own AI) than wastefully trying to solve that problem with a gigantic general-purpose model you call through a paid API!",
"raw": "Just another evidence that in a lot of cases, for a given well-scoped problem, you will be better off paying to collect & annotate data, and fine-tune a model on that data (i.e. build your own AI) than wastefully trying to solve that problem with a gigantic general-purpose model you call through a paid API!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Can you beat an AI at Raven puzzles?
https://huggingface.co/spaces/HuggingFaceM4/ai_raven
The most powerful vision+language AI systems like Gemini or GPT4V struggle with this problem when used out-of-the-box (https://huggingface.co/papers/2403.04732).
But when properly trained, a small ~8B model can be very accurate at these IQ tests, solely based on visual inputs!
Raven's Progressive Matrices are visual intelligence tests invented in the 1930s designed to measure abstract reasoning and problem-solving ability. The test consists of a series of matrices or patterns with one part missing. The task for the test-taker is to identify the missing piece from a set of options.
Such puzzles can be procedurally generated at scale. https://huggingface.co/datasets/HuggingFaceM4/RAVEN is one example. The complexity of the puzzles is then controlled by the complexity of the generation procedure.
We fine-tuned an early checkpoint of our upcoming vision-and-language model idefics2 on that dataset. The resulting checkpoint yields ~91% accuracy! No chain of thoughts, no pre-processing of the image, no additional inputs or metadata, just the RAVEN problem fed to the model as a standalone image (and a short instruction to the model “Which figure should complete the logical sequence?”), with the training objective being the standard cross-entropy.
Just another evidence that in a lot of cases, for a given well-scoped problem, you will be better off paying to collect & annotate data, and fine-tune a model on that data (i.e. build your own AI) than wastefully trying to solve that problem with a gigantic general-purpose model you call through a paid API! | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1619623771844-5ecea265968f6028e0559fa5.jpeg",
"fullname": "Victor Sanh",
"name": "VictorSanh",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 206,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/5ecea265968f6028e0559fa5/HPyPOmRzsfHK34FKBHQHO.png"
}
] | [] | [
{
"reaction": "❤️",
"users": [
"samusenps",
"osanseviero",
"clefourrier"
],
"count": 3
},
{
"reaction": "🔥",
"users": [
"yizheapple"
],
"count": 1
}
] | 2024-03-12T20:25:58.000Z | 2024-03-18T18:46:47.142Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/YurLUFRKxyg3R753mCF_V.jpeg",
"fullname": "Kelvin Bruce ",
"name": "Kelvin19",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 4,
"isFollowing": false
}
] | /posts/VictorSanh/696955717140106 | 371 | 1 |
683697778080709 | [
{
"type": "text",
"value": "Can we align code generation models to be good at chat without compromising their base capabilities 🤔?",
"raw": "Can we align code generation models to be good at chat without compromising their base capabilities 🤔?",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "This was the question the H4 team asked itself when BigCode released StarCoder2 a bit over a week ago. We knew that code models like ",
"raw": "This was the question the H4 team asked itself when BigCode released StarCoder2 a bit over a week ago. We knew that code models like ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-instruct",
"href": null,
"resource": {
"type": "model",
"id": "deepseek-ai/deepseek-coder-6.7b-instruct",
"discussionNum": null
},
"url": "https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-instruct",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " and ",
"raw": " and ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/m-a-p/OpenCodeInterpreter-DS-33B",
"href": null,
"resource": {
"type": "model",
"id": "m-a-p/OpenCodeInterpreter-DS-33B",
"discussionNum": null
},
"url": "https://huggingface.co/m-a-p/OpenCodeInterpreter-DS-33B",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " get impressive scores on code benchmarks like HumanEval, but they tend to score poorly on chat benchmarks like MT Bench and IFEval. We also knew that the Zephyr recipe we applied to Mistral 7B produced a strong chat model, so we wondered -- could be tweaked to produce a strong coding assistant?",
"raw": " get impressive scores on code benchmarks like HumanEval, but they tend to score poorly on chat benchmarks like MT Bench and IFEval. We also knew that the Zephyr recipe we applied to Mistral 7B produced a strong chat model, so we wondered -- could be tweaked to produce a strong coding assistant?",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "It turns out the answer is yes and I'm happy to share StarChat2, a DPO fine-tune of StarCoder2 15B that scores highly on both HumanEval and MT Bench / IFEval 🌟!",
"raw": "It turns out the answer is yes and I'm happy to share StarChat2, a DPO fine-tune of StarCoder2 15B that scores highly on both HumanEval and MT Bench / IFEval 🌟!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The most interesting lesson for me was that you get better models by blending in more code/math data than chat during the SFT step - in terms of tokens, we found a ratio of 3:1 worked best.",
"raw": "The most interesting lesson for me was that you get better models by blending in more code/math data than chat during the SFT step - in terms of tokens, we found a ratio of 3:1 worked best.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Anyway, here's a demo of the model, along with all the code and datasets we used to train it:",
"raw": "Anyway, here's a demo of the model, along with all the code and datasets we used to train it:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "* Demo: ",
"raw": "* Demo: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/HuggingFaceH4/starchat2-playground",
"href": null,
"resource": {
"type": "space",
"id": "HuggingFaceH4/starchat2-playground",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/HuggingFaceH4/starchat2-playground",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "* Collection: ",
"raw": "* Collection: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/HuggingFaceH4/starchat2-15b-65f068417b330fafad751fce",
"href": null,
"resource": {
"type": "collection",
"id": "HuggingFaceH4/starchat2-15b-65f068417b330fafad751fce",
"discussionNum": null
},
"url": "https://huggingface.co/collections/HuggingFaceH4/starchat2-15b-65f068417b330fafad751fce",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "* Recipe: ",
"raw": "* Recipe: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/huggingface/alignment-handbook",
"href": "https://github.com/huggingface/alignment-handbook",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Hope it's useful to others!",
"raw": "Hope it's useful to others!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Can we align code generation models to be good at chat without compromising their base capabilities 🤔?
This was the question the H4 team asked itself when BigCode released StarCoder2 a bit over a week ago. We knew that code models like https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-instruct and https://huggingface.co/m-a-p/OpenCodeInterpreter-DS-33B get impressive scores on code benchmarks like HumanEval, but they tend to score poorly on chat benchmarks like MT Bench and IFEval. We also knew that the Zephyr recipe we applied to Mistral 7B produced a strong chat model, so we wondered -- could be tweaked to produce a strong coding assistant?
It turns out the answer is yes and I'm happy to share StarChat2, a DPO fine-tune of StarCoder2 15B that scores highly on both HumanEval and MT Bench / IFEval 🌟!
The most interesting lesson for me was that you get better models by blending in more code/math data than chat during the SFT step - in terms of tokens, we found a ratio of 3:1 worked best.
Anyway, here's a demo of the model, along with all the code and datasets we used to train it:
* Demo: https://huggingface.co/spaces/HuggingFaceH4/starchat2-playground
* Collection: https://huggingface.co/collections/HuggingFaceH4/starchat2-15b-65f068417b330fafad751fce
* Recipe: https://github.com/huggingface/alignment-handbook
Hope it's useful to others!
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1594651707950-noauth.jpeg",
"fullname": "Lewis Tunstall",
"name": "lewtun",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 678,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/5f0c746619cb630495b814fd/y5NyAQheTwjNp9OJV_hbc.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/5f0c746619cb630495b814fd/TlTKEGSsZEauL_lNnMXN1.png"
}
] | [] | [
{
"reaction": "❤️",
"users": [
"lvwerra",
"kashif",
"osanseviero",
"smangrul",
"jordangong",
"Chunte",
"sasikiran",
"MexIvanov",
"vicgalle",
"medmac01"
],
"count": 10
},
{
"reaction": "👀",
"users": [
"victor"
],
"count": 1
},
{
"reaction": "🔥",
"users": [
"medmac01"
],
"count": 1
}
] | 2024-03-12T19:51:04.000Z | 2024-03-14T02:24:39.361Z | [
{
"avatarUrl": "/avatars/7769fb2bdb05116cd3ea8a112f0beeeb.svg",
"fullname": "Sasi Kiran",
"name": "sasikiran",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1594651707950-noauth.jpeg",
"fullname": "Lewis Tunstall",
"name": "lewtun",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 678,
"isFollowing": false
}
] | /posts/lewtun/683697778080709 | 1,808 | 3 |
716968829982789 | [
{
"type": "text",
"value": "🎥 🤾 Vid2Persona: talk to person from video clip",
"raw": "🎥 🤾 Vid2Persona: talk to person from video clip",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "A fun project over the last week with ",
"raw": "A fun project over the last week with ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@sayakpaul",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "sayakpaul",
"label": null,
"lang": null
},
{
"type": "text",
"value": ". It has a simple pipeline from extracting traits of video characters to chatting with them.",
"raw": ". It has a simple pipeline from extracting traits of video characters to chatting with them.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Under the hood, this project leverages the power of both commercial and open source models. We used Google's Gemini 1.0 Pro Vision model to understand the video content directly, then we used HuggingFaceH4/zephyr-7b-beta model to make conversation!",
"raw": "Under the hood, this project leverages the power of both commercial and open source models. We used Google's Gemini 1.0 Pro Vision model to understand the video content directly, then we used HuggingFaceH4/zephyr-7b-beta model to make conversation!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Try it Hugging Face Space and let us know what you think.",
"raw": "Try it Hugging Face Space and let us know what you think.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": ": ",
"raw": ": ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/chansung/vid2persona",
"href": null,
"resource": {
"type": "space",
"id": "chansung/vid2persona",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/chansung/vid2persona",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The space application is a dedicated implementation for ZeroGPU environment + Hugging Face Inference API with PRO account. If you wish to host it on your own environment, consider duplicate the space or run locally with the project repository",
"raw": "The space application is a dedicated implementation for ZeroGPU environment + Hugging Face Inference API with PRO account. If you wish to host it on your own environment, consider duplicate the space or run locally with the project repository",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": ": ",
"raw": ": ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/deep-diver/Vid2Persona",
"href": "https://github.com/deep-diver/Vid2Persona",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 🎥 🤾 Vid2Persona: talk to person from video clip
A fun project over the last week with @sayakpaul. It has a simple pipeline from extracting traits of video characters to chatting with them.
Under the hood, this project leverages the power of both commercial and open source models. We used Google's Gemini 1.0 Pro Vision model to understand the video content directly, then we used HuggingFaceH4/zephyr-7b-beta model to make conversation!
Try it Hugging Face Space and let us know what you think.
: https://huggingface.co/spaces/chansung/vid2persona
The space application is a dedicated implementation for ZeroGPU environment + Hugging Face Inference API with PRO account. If you wish to host it on your own environment, consider duplicate the space or run locally with the project repository
: https://github.com/deep-diver/Vid2Persona | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1659971187637-60d3b57ad7b174177faabd6e.jpeg",
"fullname": "chansung park",
"name": "chansung",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 2695,
"isFollowing": false
} | [] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1649681653581-5f7fbd813e94f16a85448745.jpeg",
"fullname": "Sayak Paul",
"name": "sayakpaul",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 459
}
] | [
{
"reaction": "🤗",
"users": [
"chansung",
"anonyme789",
"Chan-Y",
"victor",
"osanseviero",
"xprilion"
],
"count": 6
},
{
"reaction": "🤯",
"users": [
"anonyme789",
"Srulikbd",
"osanseviero",
"xprilion"
],
"count": 4
}
] | 2024-03-12T00:54:34.000Z | 2024-03-12T00:54:34.113Z | [] | /posts/chansung/716968829982789 | 810 | 0 |
984424866637646 | [
{
"type": "text",
"value": "If you're trying to run MoE Mixtral-8x7b under DeepSpeed w/ HF Transformers it's likely to hang on the first forward.",
"raw": "If you're trying to run MoE Mixtral-8x7b under DeepSpeed w/ HF Transformers it's likely to hang on the first forward.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The solution is here ",
"raw": "The solution is here ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/microsoft/DeepSpeed/pull/4966?_x_tr_sl=auto&_x_tr_tl=en&_x_tr_hl=en-US#issuecomment-1989671378",
"href": "https://github.com/microsoft/DeepSpeed/pull/4966?_x_tr_sl=auto&_x_tr_tl=en&_x_tr_hl=en-US#issuecomment-1989671378",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "and you need deepspeed>=0.13.0",
"raw": "and you need deepspeed>=0.13.0",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Thanks to Masahiro Tanaka for the fix.",
"raw": "Thanks to Masahiro Tanaka for the fix.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | If you're trying to run MoE Mixtral-8x7b under DeepSpeed w/ HF Transformers it's likely to hang on the first forward.
The solution is here https://github.com/microsoft/DeepSpeed/pull/4966?_x_tr_sl=auto&_x_tr_tl=en&_x_tr_hl=en-US#issuecomment-1989671378
and you need deepspeed>=0.13.0
Thanks to Masahiro Tanaka for the fix. | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1594311341799-5f07383b19cb630495b812cd.jpeg",
"fullname": "Stas Bekman",
"name": "stas",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 97,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👍",
"users": [
"nss-ysasaki",
"sitloboi2012",
"theainerd",
"osanseviero",
"victor",
"muhtasham",
"hangzhang-nlp"
],
"count": 7
}
] | 2024-03-12T00:14:41.000Z | 2024-03-12T00:15:22.852Z | [] | /posts/stas/984424866637646 | 535 | 0 |
188560282332993 | [
{
"type": "text",
"value": "🚀🕺🌟 New Research Alert - AAAI 2024! 🌟💃🚀",
"raw": "🚀🕺🌟 New Research Alert - AAAI 2024! 🌟💃🚀",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📄 Title: Relightable and Animatable Neural Avatars from Videos 🌟🚀",
"raw": "📄 Title: Relightable and Animatable Neural Avatars from Videos 🌟🚀",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📝 Description: Relightable & animatable neural avatars from sparse videos.",
"raw": "📝 Description: Relightable & animatable neural avatars from sparse videos.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "👥 Authors: Wenbin Lin, Chengwei Zheng, Jun-Hai Yong, and Feng Xu",
"raw": "👥 Authors: Wenbin Lin, Chengwei Zheng, Jun-Hai Yong, and Feng Xu",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📅 Conference: AAAI, February 20-27, 2024 | Vancouver, Canada 🇨🇦",
"raw": "📅 Conference: AAAI, February 20-27, 2024 | Vancouver, Canada 🇨🇦",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔗 Paper: ",
"raw": "🔗 Paper: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2312.12877",
"href": null,
"resource": {
"type": "paper",
"id": "2312.12877",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2312.12877",
"code": null,
"user": null,
"label": "Relightable and Animatable Neural Avatars from Videos (2312.12877)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🌐 Github Page: ",
"raw": "🌐 Github Page: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://wenbin-lin.github.io/RelightableAvatar-page",
"href": "https://wenbin-lin.github.io/RelightableAvatar-page",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📁 Repository: ",
"raw": "📁 Repository: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/wenbin-lin/RelightableAvatar",
"href": "https://github.com/wenbin-lin/RelightableAvatar",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📺 Video: ",
"raw": "📺 Video: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://www.youtube.com/watch?v=v9rlys0xQGo",
"href": "https://www.youtube.com/watch?v=v9rlys0xQGo",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📚 More Papers: more cutting-edge research presented at other conferences in the ",
"raw": "📚 More Papers: more cutting-edge research presented at other conferences in the ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers",
"href": null,
"resource": {
"type": "space",
"id": "DmitryRyumin/NewEraAI-Papers",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " curated by ",
"raw": " curated by ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@DmitryRyumin",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "DmitryRyumin",
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🚀 Added to the Avatars Collection: ",
"raw": "🚀 Added to the Avatars Collection: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36",
"href": null,
"resource": {
"type": "collection",
"id": "DmitryRyumin/avatars-65df37cdf81fec13d4dbac36",
"discussionNum": null
},
"url": "https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📚 Added to the AAAI 2024 Papers: ",
"raw": "📚 Added to the AAAI 2024 Papers: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/DmitryRyumin/AAAI-2024-Papers",
"href": "https://github.com/DmitryRyumin/AAAI-2024-Papers",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔍 Keywords: #NeuralAvatar #RelightableAvatars #AnimatableAvatars #3DModeling #PhotorealisticRendering #ShadowModeling #DigitalAvatars #GeometryModeling #AAAI2024 #DeepLearning #Animation #Innovation",
"raw": "🔍 Keywords: #NeuralAvatar #RelightableAvatars #AnimatableAvatars #3DModeling #PhotorealisticRendering #ShadowModeling #DigitalAvatars #GeometryModeling #AAAI2024 #DeepLearning #Animation #Innovation",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 🚀🕺🌟 New Research Alert - AAAI 2024! 🌟💃🚀
📄 Title: Relightable and Animatable Neural Avatars from Videos 🌟🚀
📝 Description: Relightable & animatable neural avatars from sparse videos.
👥 Authors: Wenbin Lin, Chengwei Zheng, Jun-Hai Yong, and Feng Xu
📅 Conference: AAAI, February 20-27, 2024 | Vancouver, Canada 🇨🇦
🔗 Paper: https://huggingface.co/papers/2312.12877
🌐 Github Page: https://wenbin-lin.github.io/RelightableAvatar-page
📁 Repository: https://github.com/wenbin-lin/RelightableAvatar
📺 Video: https://www.youtube.com/watch?v=v9rlys0xQGo
📚 More Papers: more cutting-edge research presented at other conferences in the https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers curated by @DmitryRyumin
🚀 Added to the Avatars Collection: https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36
📚 Added to the AAAI 2024 Papers: https://github.com/DmitryRyumin/AAAI-2024-Papers
🔍 Keywords: #NeuralAvatar #RelightableAvatars #AnimatableAvatars #3DModeling #PhotorealisticRendering #ShadowModeling #DigitalAvatars #GeometryModeling #AAAI2024 #DeepLearning #Animation #Innovation | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg",
"fullname": "Dmitry Ryumin",
"name": "DmitryRyumin",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 377,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/LoYx0_jpXx8VsmFxmLvD3.png"
},
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/jkZid-IoTAKGZqY1airI5.mp4"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/HDhU2r7xjVO3i5VhkqYXi.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/L7_KilMltwIpDs8T1oMET.png"
}
] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg",
"fullname": "Dmitry Ryumin",
"name": "DmitryRyumin",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 377
}
] | [
{
"reaction": "❤️",
"users": [
"DmitryRyumin",
"samusenps",
"osanseviero",
"sitloboi2012",
"Lician"
],
"count": 5
},
{
"reaction": "👍",
"users": [
"samusenps",
"dashfunnydashdash"
],
"count": 2
},
{
"reaction": "😎",
"users": [
"victor"
],
"count": 1
}
] | 2024-03-11T21:05:52.000Z | 2024-03-11T23:26:25.187Z | [] | /posts/DmitryRyumin/188560282332993 | 109 | 0 |
400007373757162 | [
{
"type": "text",
"value": "671 authors 🤯",
"raw": "671 authors 🤯",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2403.05530",
"href": null,
"resource": {
"type": "paper",
"id": "2403.05530",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2403.05530",
"code": null,
"user": null,
"label": "Gemini 1.5: Unlocking multimodal understanding across millions of tokens\n of context (2403.05530)",
"lang": null
},
{
"type": "text",
"value": " Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context",
"raw": " Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 671 authors 🤯
https://huggingface.co/papers/2403.05530 Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5dd96eb166059660ed1ee413/NQtzmrDdbG0H8qkZvRyGk.jpeg",
"fullname": "Julien Chaumond",
"name": "julien-c",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 1580,
"isFollowing": false
} | [] | [] | [
{
"reaction": "❤️",
"users": [
"osanseviero",
"muhtasham",
"dvilasuero",
"pabloce",
"diwank"
],
"count": 5
}
] | 2024-03-11T17:05:07.000Z | 2024-03-13T11:49:42.217Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64aea8ff67511bd3d965697b/Jxn52EmDF5RApJh8antxn.jpeg",
"fullname": "Feynman Innovations",
"name": "ajibawa-2023",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 138,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5dd96eb166059660ed1ee413/NQtzmrDdbG0H8qkZvRyGk.jpeg",
"fullname": "Julien Chaumond",
"name": "julien-c",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 1580,
"isFollowing": false
}
] | /posts/julien-c/400007373757162 | 593 | 2 |
391996089229129 | [
{
"type": "text",
"value": "New foundation model on document understanding and generation in transformers 🤩",
"raw": "New foundation model on document understanding and generation in transformers 🤩",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "UDOP by MSFT is a bleeding-edge model that is capable of many tasks, including question answering, document editing and more! 🤯",
"raw": "UDOP by MSFT is a bleeding-edge model that is capable of many tasks, including question answering, document editing and more! 🤯",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Demo 👉 ",
"raw": "Demo 👉 ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/merve/UDOP",
"href": null,
"resource": {
"type": "space",
"id": "merve/UDOP",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/merve/UDOP",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "It is a model that combines vision, text and layout. 📝",
"raw": "It is a model that combines vision, text and layout. 📝",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "This model is very interesting because the input representation truly captures the nature of the document modality: text, where the text is, and the layout of the document matters!",
"raw": "This model is very interesting because the input representation truly captures the nature of the document modality: text, where the text is, and the layout of the document matters!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "If you know T5, it resembles that: it's pre-trained on both self-supervised and supervised objectives over text, image and layout.",
"raw": "If you know T5, it resembles that: it's pre-trained on both self-supervised and supervised objectives over text, image and layout.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "To switch between tasks, one simply needs to change the task specific prompt at the beginning, e.g. for QA, one prepends with Question answering. ",
"raw": "To switch between tasks, one simply needs to change the task specific prompt at the beginning, e.g. for QA, one prepends with Question answering. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "As for the architecture, it's like T5, except it has a single encoder that takes in text, image and layout, and two decoders (text-layout and vision decoders) combined into one.",
"raw": "As for the architecture, it's like T5, except it has a single encoder that takes in text, image and layout, and two decoders (text-layout and vision decoders) combined into one.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The vision decoder is a masked autoencoder (thus the capabilities of document editing).",
"raw": "The vision decoder is a masked autoencoder (thus the capabilities of document editing).",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "For me, the most interesting capability is document reconstruction, document editing and layout re-arrangement. This decoder isn't released though because it could be used maliciously to fake document editing.",
"raw": "For me, the most interesting capability is document reconstruction, document editing and layout re-arrangement. This decoder isn't released though because it could be used maliciously to fake document editing.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Overall, the model performs very well on document understanding benchmark (DUE) and also information extraction (FUNSD, CORD) and classification (RVL-CDIP) for vision, text, layout modalities.",
"raw": "Overall, the model performs very well on document understanding benchmark (DUE) and also information extraction (FUNSD, CORD) and classification (RVL-CDIP) for vision, text, layout modalities.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "You can learn more about the model from below resources (h/t to ",
"raw": "You can learn more about the model from below resources (h/t to ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@nielsr",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "nielsr",
"label": null,
"lang": null
},
{
"type": "text",
"value": "), thanks a lot for reading 🤗",
"raw": "), thanks a lot for reading 🤗",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Docs: ",
"raw": "Docs: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/docs/transformers/main/en/model_doc/udop",
"href": "https://huggingface.co/docs/transformers/main/en/model_doc/udop",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " 📚 ",
"raw": " 📚 ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Checkpoints: ",
"raw": "Checkpoints: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/microsoft/udop-65e625124aee97415b88b513",
"href": null,
"resource": {
"type": "collection",
"id": "microsoft/udop-65e625124aee97415b88b513",
"discussionNum": null
},
"url": "https://huggingface.co/collections/microsoft/udop-65e625124aee97415b88b513",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Demo notebooks: ",
"raw": "Demo notebooks: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/NielsRogge/Transformers-Tutorials/tree/master/UDOP",
"href": "https://github.com/NielsRogge/Transformers-Tutorials/tree/master/UDOP",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " 📕",
"raw": " 📕",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | New foundation model on document understanding and generation in transformers 🤩
UDOP by MSFT is a bleeding-edge model that is capable of many tasks, including question answering, document editing and more! 🤯
Demo 👉 https://huggingface.co/spaces/merve/UDOP
It is a model that combines vision, text and layout. 📝
This model is very interesting because the input representation truly captures the nature of the document modality: text, where the text is, and the layout of the document matters!
If you know T5, it resembles that: it's pre-trained on both self-supervised and supervised objectives over text, image and layout.
To switch between tasks, one simply needs to change the task specific prompt at the beginning, e.g. for QA, one prepends with Question answering.
As for the architecture, it's like T5, except it has a single encoder that takes in text, image and layout, and two decoders (text-layout and vision decoders) combined into one.
The vision decoder is a masked autoencoder (thus the capabilities of document editing).
For me, the most interesting capability is document reconstruction, document editing and layout re-arrangement. This decoder isn't released though because it could be used maliciously to fake document editing.
Overall, the model performs very well on document understanding benchmark (DUE) and also information extraction (FUNSD, CORD) and classification (RVL-CDIP) for vision, text, layout modalities.
You can learn more about the model from below resources (h/t to
@nielsr), thanks a lot for reading 🤗
Docs: https://huggingface.co/docs/transformers/main/en/model_doc/udop 📚
Checkpoints: https://huggingface.co/collections/microsoft/udop-65e625124aee97415b88b513
Demo notebooks: https://github.com/NielsRogge/Transformers-Tutorials/tree/master/UDOP 📕 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648113222875-6141a88b3a0ec78603c9e784.png",
"fullname": "Merve Noyan",
"name": "merve",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 5589,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6141a88b3a0ec78603c9e784/98QyX-MW_OkbIdDZbSa3M.png"
}
] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1608042047613-5f1158120c833276f61f1a84.jpeg",
"fullname": "Niels Rogge",
"name": "nielsr",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 680
}
] | [
{
"reaction": "❤️",
"users": [
"samusenps",
"rajistics",
"not-lain",
"cstr",
"diwank"
],
"count": 5
}
] | 2024-03-11T16:59:02.000Z | 2024-03-11T16:59:25.239Z | [] | /posts/merve/391996089229129 | 487 | 0 |
263130686669707 | [
{
"type": "text",
"value": "🔍 Today's pick in Interpretability & Analysis of LMs: Information Flow Routes: Automatically Interpreting Language Models at Scale by ",
"raw": "🔍 Today's pick in Interpretability & Analysis of LMs: Information Flow Routes: Automatically Interpreting Language Models at Scale by ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@javifer",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "javifer",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@lena-voita",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "lena-voita",
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "This work presents a novel method to identify salient components in Transformer-based language models by decomposing the contribution of various model components into the residual stream.",
"raw": "This work presents a novel method to identify salient components in Transformer-based language models by decomposing the contribution of various model components into the residual stream.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "This method is more efficient and scalable than previous techniques such as activation patching, as it only requires a single forward pass through the model to identify critical information flow paths. Moreover, it can be applied without a contrastive template, which is observed to produce results dependent on the selected contrastive example for activation patching.",
"raw": "This method is more efficient and scalable than previous techniques such as activation patching, as it only requires a single forward pass through the model to identify critical information flow paths. Moreover, it can be applied without a contrastive template, which is observed to produce results dependent on the selected contrastive example for activation patching.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Information flow routes are applied to Llama 2, showing that:",
"raw": "Information flow routes are applied to Llama 2, showing that:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "1. Models show “typical” information flow routes for non-content words, while content words don’t exhibit such patterns.",
"raw": "1. Models show “typical” information flow routes for non-content words, while content words don’t exhibit such patterns.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "2. Feedforward networks are more active in the bottom layers of the network (where e.g. subject enrichment is performed) and in very last layer.",
"raw": "2. Feedforward networks are more active in the bottom layers of the network (where e.g. subject enrichment is performed) and in very last layer.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "3. Positional and subword-merging attention heads are among the most active and important throughout the network.",
"raw": "3. Positional and subword-merging attention heads are among the most active and important throughout the network.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "4. Periods can be treated by the model as BOS tokens by leaving their residual representation mostly untouched during the forward pass.",
"raw": "4. Periods can be treated by the model as BOS tokens by leaving their residual representation mostly untouched during the forward pass.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Finally, the paper also demonstrates that some model components are specialized for specific domains, such as coding or multilingual texts, suggesting a high degree of modularity in the network. The contribution of domain-specific heads obtained by projecting right singular values of the OV circuit to the unembedding matrix show highly interpretable concepts being handled in granular model components.",
"raw": "Finally, the paper also demonstrates that some model components are specialized for specific domains, such as coding or multilingual texts, suggesting a high degree of modularity in the network. The contribution of domain-specific heads obtained by projecting right singular values of the OV circuit to the unembedding matrix show highly interpretable concepts being handled in granular model components.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📄 Paper: ",
"raw": "📄 Paper: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2403.00824",
"href": null,
"resource": {
"type": "paper",
"id": "2403.00824",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2403.00824",
"code": null,
"user": null,
"label": "Information Flow Routes: Automatically Interpreting Language Models at\n Scale (2403.00824)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔍 All daily picks: ",
"raw": "🔍 All daily picks: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/gsarti/daily-picks-in-interpretability-and-analysis-of-lms-65ae3339949c5675d25de2f9",
"href": null,
"resource": {
"type": "collection",
"id": "gsarti/daily-picks-in-interpretability-and-analysis-of-lms-65ae3339949c5675d25de2f9",
"discussionNum": null
},
"url": "https://huggingface.co/collections/gsarti/daily-picks-in-interpretability-and-analysis-of-lms-65ae3339949c5675d25de2f9",
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 🔍 Today's pick in Interpretability & Analysis of LMs: Information Flow Routes: Automatically Interpreting Language Models at Scale by @javifer @lena-voita
This work presents a novel method to identify salient components in Transformer-based language models by decomposing the contribution of various model components into the residual stream.
This method is more efficient and scalable than previous techniques such as activation patching, as it only requires a single forward pass through the model to identify critical information flow paths. Moreover, it can be applied without a contrastive template, which is observed to produce results dependent on the selected contrastive example for activation patching.
Information flow routes are applied to Llama 2, showing that:
1. Models show “typical” information flow routes for non-content words, while content words don’t exhibit such patterns.
2. Feedforward networks are more active in the bottom layers of the network (where e.g. subject enrichment is performed) and in very last layer.
3. Positional and subword-merging attention heads are among the most active and important throughout the network.
4. Periods can be treated by the model as BOS tokens by leaving their residual representation mostly untouched during the forward pass.
Finally, the paper also demonstrates that some model components are specialized for specific domains, such as coding or multilingual texts, suggesting a high degree of modularity in the network. The contribution of domain-specific heads obtained by projecting right singular values of the OV circuit to the unembedding matrix show highly interpretable concepts being handled in granular model components.
📄 Paper: https://huggingface.co/papers/2403.00824
🔍 All daily picks: https://huggingface.co/collections/gsarti/daily-picks-in-interpretability-and-analysis-of-lms-65ae3339949c5675d25de2f9 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1670231290373-5e7749883d77a72421292d07.jpeg",
"fullname": "Gabriele Sarti",
"name": "gsarti",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 205,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/5e7749883d77a72421292d07/5Kmgr3KDK4Zqf4PuzfJmb.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/5e7749883d77a72421292d07/DOvQlqgcC-WZZ_BNwwh3C.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/5e7749883d77a72421292d07/-ck9Fnodt4mYZp0_-DcW6.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/5e7749883d77a72421292d07/lXyBbuRqHnQU9ZLdtbrPr.png"
}
] | [
{
"avatarUrl": "/avatars/bcc94a31fab7486ca9d018245a289fb0.svg",
"fullname": "Javier Ferrando",
"name": "javifer",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 5
},
{
"avatarUrl": "/avatars/d5e02ca2e88bb9b17f92703927a34df4.svg",
"fullname": "Elena Voita",
"name": "lena-voita",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2
}
] | [
{
"reaction": "❤️",
"users": [
"Kukedlc",
"samusenps",
"javifer",
"CristianJD",
"Theli"
],
"count": 5
},
{
"reaction": "🔥",
"users": [
"eramax",
"mmhamdy"
],
"count": 2
}
] | 2024-03-11T16:40:50.000Z | 2024-03-11T19:15:46.515Z | [] | /posts/gsarti/263130686669707 | 158 | 0 |
915464780192455 | [
{
"type": "text",
"value": "A recent paper titled \"ShortGPT: Layers in Large Language Models are More Redundant Than You Expect\" proposes a simple and effective approach to pruning Large Language Models (LLMs) by removing redundant layers. ",
"raw": "A recent paper titled \"ShortGPT: Layers in Large Language Models are More Redundant Than You Expect\" proposes a simple and effective approach to pruning Large Language Models (LLMs) by removing redundant layers. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Key points:",
"raw": "Key points:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "* Discovers significant redundancy across layers in LLMs, with some layers playing a negligible role for the final performance.",
"raw": "* Discovers significant redundancy across layers in LLMs, with some layers playing a negligible role for the final performance.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "* Defines a new metric called Block Influence (BI) to quantify the importance of each layer in an LLM.",
"raw": "* Defines a new metric called Block Influence (BI) to quantify the importance of each layer in an LLM.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "* Removes layers with low BI scores, achieving up to 25% reduction in parameters and computation while maintaining 92% of the LLM's performance.",
"raw": "* Removes layers with low BI scores, achieving up to 25% reduction in parameters and computation while maintaining 92% of the LLM's performance.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Congrats to the authors for their work!",
"raw": "Congrats to the authors for their work!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Paper: ",
"raw": "Paper: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2403.03853",
"href": null,
"resource": {
"type": "paper",
"id": "2403.03853",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2403.03853",
"code": null,
"user": null,
"label": "ShortGPT: Layers in Large Language Models are More Redundant Than You\n Expect (2403.03853)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | A recent paper titled "ShortGPT: Layers in Large Language Models are More Redundant Than You Expect" proposes a simple and effective approach to pruning Large Language Models (LLMs) by removing redundant layers.
Key points:
* Discovers significant redundancy across layers in LLMs, with some layers playing a negligible role for the final performance.
* Defines a new metric called Block Influence (BI) to quantify the importance of each layer in an LLM.
* Removes layers with low BI scores, achieving up to 25% reduction in parameters and computation while maintaining 92% of the LLM's performance.
Congrats to the authors for their work!
Paper: https://huggingface.co/papers/2403.03853
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/657217faabb25ed8aedd5e48/UUHAXeGtOnQBXFD3nYtf2.jpeg",
"fullname": "Vlad Bogolin",
"name": "vladbogo",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 109,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/657217faabb25ed8aedd5e48/OgMvLzFc5c89hQ8mHCVaP.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/657217faabb25ed8aedd5e48/ONUWKtYl-6OYCwXO8SQ3Q.png"
}
] | [] | [
{
"reaction": "👍",
"users": [
"Dlbk",
"emran86",
"dev7halo",
"MexIvanov",
"Pretam"
],
"count": 5
},
{
"reaction": "❤️",
"users": [
"emran86"
],
"count": 1
}
] | 2024-03-11T15:01:48.000Z | 2024-03-11T15:01:48.342Z | [] | /posts/vladbogo/915464780192455 | 100 | 0 |
349712597113481 | [
{
"type": "text",
"value": "ELLA",
"raw": "ELLA",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Equip Diffusion Models with LLM for Enhanced Semantic Alignment",
"raw": "Equip Diffusion Models with LLM for Enhanced Semantic Alignment",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2403.05135",
"href": null,
"resource": {
"type": "paper",
"id": "2403.05135",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2403.05135",
"code": null,
"user": null,
"label": "ELLA: Equip Diffusion Models with LLM for Enhanced Semantic Alignment (2403.05135)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Diffusion models have demonstrated remarkable performance in the domain of text-to-image generation. However, most widely used models still employ CLIP as their text encoder, which constrains their ability to comprehend dense prompts, encompassing multiple objects, detailed attributes, complex relationships, long-text alignment, etc. In this paper, we introduce an Efficient Large Language Model Adapter, termed ELLA, which equips text-to-image diffusion models with powerful Large Language Models (LLM) to enhance text alignment without training of either U-Net or LLM. To seamlessly bridge two pre-trained models, we investigate a range of semantic alignment connector designs and propose a novel module, the Timestep-Aware Semantic Connector (TSC), which dynamically extracts timestep-dependent conditions from LLM. Our approach adapts semantic features at different stages of the denoising process, assisting diffusion models in interpreting lengthy and intricate prompts over sampling timesteps. Additionally, ELLA can be readily incorporated with community models and tools to improve their prompt-following capabilities. To assess text-to-image models in dense prompt following, we introduce Dense Prompt Graph Benchmark (DPG-Bench), a challenging benchmark consisting of 1K dense prompts. Extensive experiments demonstrate the superiority of ELLA in dense prompt following compared to state-of-the-art methods, particularly in multiple object compositions involving diverse attributes and relationships.",
"raw": "Diffusion models have demonstrated remarkable performance in the domain of text-to-image generation. However, most widely used models still employ CLIP as their text encoder, which constrains their ability to comprehend dense prompts, encompassing multiple objects, detailed attributes, complex relationships, long-text alignment, etc. In this paper, we introduce an Efficient Large Language Model Adapter, termed ELLA, which equips text-to-image diffusion models with powerful Large Language Models (LLM) to enhance text alignment without training of either U-Net or LLM. To seamlessly bridge two pre-trained models, we investigate a range of semantic alignment connector designs and propose a novel module, the Timestep-Aware Semantic Connector (TSC), which dynamically extracts timestep-dependent conditions from LLM. Our approach adapts semantic features at different stages of the denoising process, assisting diffusion models in interpreting lengthy and intricate prompts over sampling timesteps. Additionally, ELLA can be readily incorporated with community models and tools to improve their prompt-following capabilities. To assess text-to-image models in dense prompt following, we introduce Dense Prompt Graph Benchmark (DPG-Bench), a challenging benchmark consisting of 1K dense prompts. Extensive experiments demonstrate the superiority of ELLA in dense prompt following compared to state-of-the-art methods, particularly in multiple object compositions involving diverse attributes and relationships.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | ELLA
Equip Diffusion Models with LLM for Enhanced Semantic Alignment
https://huggingface.co/papers/2403.05135
Diffusion models have demonstrated remarkable performance in the domain of text-to-image generation. However, most widely used models still employ CLIP as their text encoder, which constrains their ability to comprehend dense prompts, encompassing multiple objects, detailed attributes, complex relationships, long-text alignment, etc. In this paper, we introduce an Efficient Large Language Model Adapter, termed ELLA, which equips text-to-image diffusion models with powerful Large Language Models (LLM) to enhance text alignment without training of either U-Net or LLM. To seamlessly bridge two pre-trained models, we investigate a range of semantic alignment connector designs and propose a novel module, the Timestep-Aware Semantic Connector (TSC), which dynamically extracts timestep-dependent conditions from LLM. Our approach adapts semantic features at different stages of the denoising process, assisting diffusion models in interpreting lengthy and intricate prompts over sampling timesteps. Additionally, ELLA can be readily incorporated with community models and tools to improve their prompt-following capabilities. To assess text-to-image models in dense prompt following, we introduce Dense Prompt Graph Benchmark (DPG-Bench), a challenging benchmark consisting of 1K dense prompts. Extensive experiments demonstrate the superiority of ELLA in dense prompt following compared to state-of-the-art methods, particularly in multiple object compositions involving diverse attributes and relationships.
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674929746905-60f1abe7544c2adfd699860c.jpeg",
"fullname": "AK",
"name": "akhaliq",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 5205,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/60f1abe7544c2adfd699860c/AO09kvhH-AfnF_xCUG-CX.png"
}
] | [] | [
{
"reaction": "👍",
"users": [
"vladbogo",
"Dlbk",
"samusenps",
"ClayFace",
"Zmu",
"melohux",
"Benson"
],
"count": 7
},
{
"reaction": "❤️",
"users": [
"melohux",
"bmorphism"
],
"count": 2
}
] | 2024-03-11T14:07:56.000Z | 2024-03-11T14:07:56.218Z | [] | /posts/akhaliq/349712597113481 | 175 | 0 |
835823846472163 | [
{
"type": "text",
"value": "Super work from ",
"raw": "Super work from ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@DeepMount00",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "DeepMount00",
"label": null,
"lang": null
},
{
"type": "text",
"value": ":",
"raw": ":",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🚀 𝐃𝐢𝐬𝐜𝐨𝐯𝐞𝐫 𝐔𝐧𝐢𝐯𝐞𝐫𝐬𝐚𝐥 𝐍𝐞𝐫: 𝐀 𝐆𝐥𝐢𝐍𝐞𝐫-𝐁𝐚𝐬𝐞𝐝 𝐈𝐭𝐚𝐥𝐢𝐚𝐧 𝐍𝐄𝐑",
"raw": "🚀 𝐃𝐢𝐬𝐜𝐨𝐯𝐞𝐫 𝐔𝐧𝐢𝐯𝐞𝐫𝐬𝐚𝐥 𝐍𝐞𝐫: 𝐀 𝐆𝐥𝐢𝐍𝐞𝐫-𝐁𝐚𝐬𝐞𝐝 𝐈𝐭𝐚𝐥𝐢𝐚𝐧 𝐍𝐄𝐑",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Introducing 𝐔𝐧𝐢𝐯𝐞𝐫𝐬𝐚𝐥 𝐍𝐞𝐫 𝐟𝐨𝐫 𝐈𝐭𝐚𝐥𝐢𝐚𝐧 𝐋𝐚𝐧𝐠𝐮𝐚𝐠𝐞, a revolutionary Named Entity Recognition (NER) model evolved from the GliNer architecture and meticulously tailored for the Italian language. This advanced model is a beacon of efficiency and versatility, engineered to 𝐫𝐞𝐜𝐨𝐠𝐧𝐢𝐳𝐞 𝐚𝐧𝐲 𝐞𝐧𝐭𝐢𝐭𝐲 𝐭𝐲𝐩𝐞 within the rich nuances of Italian, using a bidirectional transformer encoder. It stands out as an ideal solution for those navigating the challenges of resource-limited environments or seeking an efficient alternative to the cumbersome Large Language Models (LLMs).",
"raw": "Introducing 𝐔𝐧𝐢𝐯𝐞𝐫𝐬𝐚𝐥 𝐍𝐞𝐫 𝐟𝐨𝐫 𝐈𝐭𝐚𝐥𝐢𝐚𝐧 𝐋𝐚𝐧𝐠𝐮𝐚𝐠𝐞, a revolutionary Named Entity Recognition (NER) model evolved from the GliNer architecture and meticulously tailored for the Italian language. This advanced model is a beacon of efficiency and versatility, engineered to 𝐫𝐞𝐜𝐨𝐠𝐧𝐢𝐳𝐞 𝐚𝐧𝐲 𝐞𝐧𝐭𝐢𝐭𝐲 𝐭𝐲𝐩𝐞 within the rich nuances of Italian, using a bidirectional transformer encoder. It stands out as an ideal solution for those navigating the challenges of resource-limited environments or seeking an efficient alternative to the cumbersome Large Language Models (LLMs).",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "𝐑𝐮𝐧𝐬 𝐟𝐚𝐬𝐭 𝐚𝐥𝐬𝐨 𝐨𝐧 𝐂𝐏𝐔!",
"raw": "𝐑𝐮𝐧𝐬 𝐟𝐚𝐬𝐭 𝐚𝐥𝐬𝐨 𝐨𝐧 𝐂𝐏𝐔!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Experience this Italian-focused innovation live on Hugging Face Spaces:",
"raw": "Experience this Italian-focused innovation live on Hugging Face Spaces:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/DeepMount00/universal_ner_ita",
"href": null,
"resource": {
"type": "space",
"id": "DeepMount00/universal_ner_ita",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/DeepMount00/universal_ner_ita",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Paper: ",
"raw": "Paper: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://arxiv.org/abs/2311.08526",
"href": "https://arxiv.org/abs/2311.08526",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " Urchade Zaratiana et all. great work!",
"raw": " Urchade Zaratiana et all. great work!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Super work from @DeepMount00:
🚀 𝐃𝐢𝐬𝐜𝐨𝐯𝐞𝐫 𝐔𝐧𝐢𝐯𝐞𝐫𝐬𝐚𝐥 𝐍𝐞𝐫: 𝐀 𝐆𝐥𝐢𝐍𝐞𝐫-𝐁𝐚𝐬𝐞𝐝 𝐈𝐭𝐚𝐥𝐢𝐚𝐧 𝐍𝐄𝐑
Introducing 𝐔𝐧𝐢𝐯𝐞𝐫𝐬𝐚𝐥 𝐍𝐞𝐫 𝐟𝐨𝐫 𝐈𝐭𝐚𝐥𝐢𝐚𝐧 𝐋𝐚𝐧𝐠𝐮𝐚𝐠𝐞, a revolutionary Named Entity Recognition (NER) model evolved from the GliNer architecture and meticulously tailored for the Italian language. This advanced model is a beacon of efficiency and versatility, engineered to 𝐫𝐞𝐜𝐨𝐠𝐧𝐢𝐳𝐞 𝐚𝐧𝐲 𝐞𝐧𝐭𝐢𝐭𝐲 𝐭𝐲𝐩𝐞 within the rich nuances of Italian, using a bidirectional transformer encoder. It stands out as an ideal solution for those navigating the challenges of resource-limited environments or seeking an efficient alternative to the cumbersome Large Language Models (LLMs).
𝐑𝐮𝐧𝐬 𝐟𝐚𝐬𝐭 𝐚𝐥𝐬𝐨 𝐨𝐧 𝐂𝐏𝐔!
Experience this Italian-focused innovation live on Hugging Face Spaces:
https://huggingface.co/spaces/DeepMount00/universal_ner_ita
Paper: https://arxiv.org/abs/2311.08526 Urchade Zaratiana et all. great work! | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5fef4eb7770b06e11c2c6381/1NMdigjCGtn0yvQZSi5NJ.png",
"fullname": "Alessandro Ercolani",
"name": "giux78",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 44,
"isFollowing": false
} | [] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64f1bf6a8b550e875926a590/xdZHPQGdI2jISWcKhWTMQ.png",
"fullname": "Michele Montebovi",
"name": "DeepMount00",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 123
}
] | [
{
"reaction": "❤️",
"users": [
"samusenps",
"osanseviero",
"Dlbk",
"DeepMount00",
"urchade",
"giux78",
"tomaarsen",
"MoritzLaurer"
],
"count": 8
}
] | 2024-03-11T13:22:40.000Z | 2024-03-14T10:10:31.401Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64f1bf6a8b550e875926a590/xdZHPQGdI2jISWcKhWTMQ.png",
"fullname": "Michele Montebovi",
"name": "DeepMount00",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 123,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6317233cc92fd6fee317e030/cJHSvvimr1kqgQfHOjO5n.png",
"fullname": "Tom Aarsen",
"name": "tomaarsen",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 1060,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5fef4eb7770b06e11c2c6381/1NMdigjCGtn0yvQZSi5NJ.png",
"fullname": "Alessandro Ercolani",
"name": "giux78",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 44,
"isFollowing": false
}
] | /posts/giux78/835823846472163 | 79 | 3 |
533611740937860 | [
{
"type": "text",
"value": "Good time to drop some 🤗 in the comments > ",
"raw": "Good time to drop some 🤗 in the comments > ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://x.com/elonmusk/status/1767108624038449405",
"href": "https://x.com/elonmusk/status/1767108624038449405",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Good time to drop some 🤗 in the comments > https://x.com/elonmusk/status/1767108624038449405 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63893d4c184615e463aa24b8/S1flsX_26OF6ZJBVcPlaf.jpeg",
"fullname": "Matt Valoatto",
"name": "mvaloatto",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 56,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/63893d4c184615e463aa24b8/dJaDhKsIrSNQfGDj_UNpV.png"
}
] | [] | [
{
"reaction": "👍",
"users": [
"samusenps",
"Dlbk",
"sa8",
"frntn",
"clefourrier",
"Zmu",
"CKeibel",
"lvalue",
"clem"
],
"count": 9
}
] | 2024-03-11T12:27:59.000Z | 2024-03-12T04:19:22.813Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6538119803519fddb4a17e10/ffJMkdx-rM7VvLTCM6ri_.jpeg",
"fullname": "samusenps",
"name": "samusenps",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 91,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63893d4c184615e463aa24b8/S1flsX_26OF6ZJBVcPlaf.jpeg",
"fullname": "Matt Valoatto",
"name": "mvaloatto",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 56,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64aea8ff67511bd3d965697b/Jxn52EmDF5RApJh8antxn.jpeg",
"fullname": "Feynman Innovations",
"name": "ajibawa-2023",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 138,
"isFollowing": false
}
] | /posts/mvaloatto/533611740937860 | 268 | 4 |
921922345187362 | [
{
"type": "text",
"value": "Some papers deserve a standing ovation after reading, “Direct Preference Optimization: Your Language Model is Secretly a Reward Model” is one such paper:",
"raw": "Some papers deserve a standing ovation after reading, “Direct Preference Optimization: Your Language Model is Secretly a Reward Model” is one such paper:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "One major drawback of LLMs is the lack of precise control over their behavior which makes it very difficult to align with desired outcomes. Existing methods to mitigate this involves gathering generated, humanly labeled data and fine-tuning the unsupervised LLM to align with preferences - this is known as Reinforcement Learning From Human Feedback (RLHF).",
"raw": "One major drawback of LLMs is the lack of precise control over their behavior which makes it very difficult to align with desired outcomes. Existing methods to mitigate this involves gathering generated, humanly labeled data and fine-tuning the unsupervised LLM to align with preferences - this is known as Reinforcement Learning From Human Feedback (RLHF).",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "RLHF is an incredibly complex, usually unstable and computationally costly method. It involves first scaling a suitable reward model that meets human preferences then fine-tuning the language model with RL to maximize the estimated reward while maintaining a major part of the original model.",
"raw": "RLHF is an incredibly complex, usually unstable and computationally costly method. It involves first scaling a suitable reward model that meets human preferences then fine-tuning the language model with RL to maximize the estimated reward while maintaining a major part of the original model.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "This paper introduces a new algorithm called Direct Preference Optimization (DPO) that simplifies the whole process. In short, it directly optimizes the LM without explicit reward modeling or reinforcement learning. This is achieved by leveraging a mapping between reward functions and optimal policies, allowing the constrained reward maximization problem to be optimized exactly with a single stage of policy training.",
"raw": "This paper introduces a new algorithm called Direct Preference Optimization (DPO) that simplifies the whole process. In short, it directly optimizes the LM without explicit reward modeling or reinforcement learning. This is achieved by leveraging a mapping between reward functions and optimal policies, allowing the constrained reward maximization problem to be optimized exactly with a single stage of policy training.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "DPO’s genius lies in its ability to intuitively increase the relative log probability of preferred to \"unpreferred\" responses.",
"raw": "DPO’s genius lies in its ability to intuitively increase the relative log probability of preferred to \"unpreferred\" responses.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The amazing thing about this paper is how fundamentally self-proven it is - from clearly stating the problem to explicitly explaining the underlying theory backed with mathematical proofs, it’s just genius.",
"raw": "The amazing thing about this paper is how fundamentally self-proven it is - from clearly stating the problem to explicitly explaining the underlying theory backed with mathematical proofs, it’s just genius.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "In my opinion, every academic research paper should follow this approach. It won the 2023 NeurIPS Outstanding paper award (Category: Outstanding Main Track Runner-Ups).",
"raw": "In my opinion, every academic research paper should follow this approach. It won the 2023 NeurIPS Outstanding paper award (Category: Outstanding Main Track Runner-Ups).",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Some papers deserve a standing ovation after reading, “Direct Preference Optimization: Your Language Model is Secretly a Reward Model” is one such paper:
One major drawback of LLMs is the lack of precise control over their behavior which makes it very difficult to align with desired outcomes. Existing methods to mitigate this involves gathering generated, humanly labeled data and fine-tuning the unsupervised LLM to align with preferences - this is known as Reinforcement Learning From Human Feedback (RLHF).
RLHF is an incredibly complex, usually unstable and computationally costly method. It involves first scaling a suitable reward model that meets human preferences then fine-tuning the language model with RL to maximize the estimated reward while maintaining a major part of the original model.
This paper introduces a new algorithm called Direct Preference Optimization (DPO) that simplifies the whole process. In short, it directly optimizes the LM without explicit reward modeling or reinforcement learning. This is achieved by leveraging a mapping between reward functions and optimal policies, allowing the constrained reward maximization problem to be optimized exactly with a single stage of policy training.
DPO’s genius lies in its ability to intuitively increase the relative log probability of preferred to "unpreferred" responses.
The amazing thing about this paper is how fundamentally self-proven it is - from clearly stating the problem to explicitly explaining the underlying theory backed with mathematical proofs, it’s just genius.
In my opinion, every academic research paper should follow this approach. It won the 2023 NeurIPS Outstanding paper award (Category: Outstanding Main Track Runner-Ups). | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6438a9027de34e8ea7e4b257/vib8QSd1AWMr_bR9ig_xJ.jpeg",
"fullname": "Jaward Sesay",
"name": "Jaward",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 191,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/yQKEg2Ew8h9-AGGxfQq9z.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/b_oF29p0Z-k7RtKg9Z2pO.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/ZwNSULLvGuu3wb_nH_Q2_.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/ImodtXVn2nzDdUsvI-H75.jpeg"
}
] | [] | [
{
"reaction": "❤️",
"users": [
"osanseviero",
"victor",
"stonestack",
"kawinm",
"Theli",
"AI-B"
],
"count": 6
}
] | 2024-03-11T08:12:04.000Z | 2024-03-11T08:12:04.348Z | [] | /posts/Jaward/921922345187362 | 49 | 0 |
265811604112635 | [
{
"type": "text",
"value": "🚀⭐️Introducing our new survey \"Leveraging Biomolecule and Natural Language through Multi-Modal Learning: A Survey\"",
"raw": "🚀⭐️Introducing our new survey \"Leveraging Biomolecule and Natural Language through Multi-Modal Learning: A Survey\"",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "arxiv: ",
"raw": "arxiv: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://arxiv.org/abs/2403.01528",
"href": "https://arxiv.org/abs/2403.01528",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "github: ",
"raw": "github: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/QizhiPei/Awesome-Biomolecule-Language-Cross-Modeling",
"href": "https://github.com/QizhiPei/Awesome-Biomolecule-Language-Cross-Modeling",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The integration of biomolecular modeling with natural language (BL) has emerged as a promising interdisciplinary area at the intersection of artificial intelligence, chemistry and biology. This approach leverages the rich, multifaceted descriptions of biomolecules contained within textual data sources to enhance our fundamental understanding and enable downstream computational tasks such as biomolecule property prediction. The fusion of the nuanced narratives expressed through natural language with the structural and functional specifics of biomolecules described via various molecular modeling techniques opens new avenues for comprehensively representing and analyzing biomolecules. By incorporating the contextual language data that surrounds biomolecules into their modeling, BL aims to capture a holistic view encompassing both the symbolic qualities conveyed through language as well as quantitative structural characteristics. In this review, we provide an extensive analysis of recent advancements achieved through cross modeling of biomolecules and natural language.",
"raw": "The integration of biomolecular modeling with natural language (BL) has emerged as a promising interdisciplinary area at the intersection of artificial intelligence, chemistry and biology. This approach leverages the rich, multifaceted descriptions of biomolecules contained within textual data sources to enhance our fundamental understanding and enable downstream computational tasks such as biomolecule property prediction. The fusion of the nuanced narratives expressed through natural language with the structural and functional specifics of biomolecules described via various molecular modeling techniques opens new avenues for comprehensively representing and analyzing biomolecules. By incorporating the contextual language data that surrounds biomolecules into their modeling, BL aims to capture a holistic view encompassing both the symbolic qualities conveyed through language as well as quantitative structural characteristics. In this review, we provide an extensive analysis of recent advancements achieved through cross modeling of biomolecules and natural language.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 🚀⭐️Introducing our new survey "Leveraging Biomolecule and Natural Language through Multi-Modal Learning: A Survey"
arxiv: https://arxiv.org/abs/2403.01528
github: https://github.com/QizhiPei/Awesome-Biomolecule-Language-Cross-Modeling
The integration of biomolecular modeling with natural language (BL) has emerged as a promising interdisciplinary area at the intersection of artificial intelligence, chemistry and biology. This approach leverages the rich, multifaceted descriptions of biomolecules contained within textual data sources to enhance our fundamental understanding and enable downstream computational tasks such as biomolecule property prediction. The fusion of the nuanced narratives expressed through natural language with the structural and functional specifics of biomolecules described via various molecular modeling techniques opens new avenues for comprehensively representing and analyzing biomolecules. By incorporating the contextual language data that surrounds biomolecules into their modeling, BL aims to capture a holistic view encompassing both the symbolic qualities conveyed through language as well as quantitative structural characteristics. In this review, we provide an extensive analysis of recent advancements achieved through cross modeling of biomolecules and natural language.
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6397f6081323f19c578f142e/it7FYYKjlLX8wSsMLm8EO.jpeg",
"fullname": "QizhiPei",
"name": "QizhiPei",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 11,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👍",
"users": [
"QizhiPei",
"osanseviero",
"victor",
"samusenps",
"MaziyarPanahi",
"bczhou"
],
"count": 6
}
] | 2024-03-11T03:28:32.000Z | 2024-03-11T03:29:44.095Z | [] | /posts/QizhiPei/265811604112635 | 308 | 0 |
818418428056695 | [
{
"type": "text",
"value": "🚀🖼️🌟 New Research Alert - CVPR 2024! 🌟🖼️🚀",
"raw": "🚀🖼️🌟 New Research Alert - CVPR 2024! 🌟🖼️🚀",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📄 Title: CAMixerSR: Only Details Need More \"Attention\" 🌟🚀",
"raw": "📄 Title: CAMixerSR: Only Details Need More \"Attention\" 🌟🚀",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📝 Description: CAMixerSR is a new approach integrating content-aware accelerating framework and token mixer design, to pursue more efficient SR inference via assigning convolution for simple regions but window-attention for complex textures. It exhibits excellent generality and attains competitive results among state-of-the-art models with better complexity-performance trade-offs on large-image SR, lightweight SR, and omnidirectional-image SR.",
"raw": "📝 Description: CAMixerSR is a new approach integrating content-aware accelerating framework and token mixer design, to pursue more efficient SR inference via assigning convolution for simple regions but window-attention for complex textures. It exhibits excellent generality and attains competitive results among state-of-the-art models with better complexity-performance trade-offs on large-image SR, lightweight SR, and omnidirectional-image SR.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "👥 Authors: Yan Wang, Shijie Zhao, Yi Liu, Junlin Li, and Li Zhang",
"raw": "👥 Authors: Yan Wang, Shijie Zhao, Yi Liu, Junlin Li, and Li Zhang",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📅 Conference: CVPR, Jun 17-21, 2024 | Seattle WA, USA 🇺🇸",
"raw": "📅 Conference: CVPR, Jun 17-21, 2024 | Seattle WA, USA 🇺🇸",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔗 Paper: ",
"raw": "🔗 Paper: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2402.19289",
"href": null,
"resource": {
"type": "paper",
"id": "2402.19289",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2402.19289",
"code": null,
"user": null,
"label": "CAMixerSR: Only Details Need More \"Attention\" (2402.19289)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔗 Repository: ",
"raw": "🔗 Repository: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/icandle/CAMixerSR",
"href": "https://github.com/icandle/CAMixerSR",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📚 More Papers: more cutting-edge research presented at other conferences in the ",
"raw": "📚 More Papers: more cutting-edge research presented at other conferences in the ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers",
"href": null,
"resource": {
"type": "space",
"id": "DmitryRyumin/NewEraAI-Papers",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " curated by ",
"raw": " curated by ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@DmitryRyumin",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "DmitryRyumin",
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🚀 Added to the Image Enhancement Collection: ",
"raw": "🚀 Added to the Image Enhancement Collection: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/DmitryRyumin/image-enhancement-65ee1cd2fe1c0c877ae55d28",
"href": null,
"resource": {
"type": "collection",
"id": "DmitryRyumin/image-enhancement-65ee1cd2fe1c0c877ae55d28",
"discussionNum": null
},
"url": "https://huggingface.co/collections/DmitryRyumin/image-enhancement-65ee1cd2fe1c0c877ae55d28",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔍 Keywords: #CAMixerSR #SuperResolution #WindowAttention #ImageEnhancement #CVPR2024 #DeepLearning #Innovation",
"raw": "🔍 Keywords: #CAMixerSR #SuperResolution #WindowAttention #ImageEnhancement #CVPR2024 #DeepLearning #Innovation",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 🚀🖼️🌟 New Research Alert - CVPR 2024! 🌟🖼️🚀
📄 Title: CAMixerSR: Only Details Need More "Attention" 🌟🚀
📝 Description: CAMixerSR is a new approach integrating content-aware accelerating framework and token mixer design, to pursue more efficient SR inference via assigning convolution for simple regions but window-attention for complex textures. It exhibits excellent generality and attains competitive results among state-of-the-art models with better complexity-performance trade-offs on large-image SR, lightweight SR, and omnidirectional-image SR.
👥 Authors: Yan Wang, Shijie Zhao, Yi Liu, Junlin Li, and Li Zhang
📅 Conference: CVPR, Jun 17-21, 2024 | Seattle WA, USA 🇺🇸
🔗 Paper: https://huggingface.co/papers/2402.19289
🔗 Repository: https://github.com/icandle/CAMixerSR
📚 More Papers: more cutting-edge research presented at other conferences in the https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers curated by @DmitryRyumin
🚀 Added to the Image Enhancement Collection: https://huggingface.co/collections/DmitryRyumin/image-enhancement-65ee1cd2fe1c0c877ae55d28
🔍 Keywords: #CAMixerSR #SuperResolution #WindowAttention #ImageEnhancement #CVPR2024 #DeepLearning #Innovation | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg",
"fullname": "Dmitry Ryumin",
"name": "DmitryRyumin",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 377,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/TQ7fnhu2Roa7DGVWQ7BfD.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/48Oj7YP0TXVLr3HDbFnJY.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/asljGeMQucHSDqJ5J08uz.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/Pno9Jzjqu4tuh8CfxRe_e.png"
}
] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg",
"fullname": "Dmitry Ryumin",
"name": "DmitryRyumin",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 377
}
] | [
{
"reaction": "👍",
"users": [
"DmitryRyumin",
"samusenps",
"osanseviero",
"Jakaline",
"victor",
"Dimo87"
],
"count": 6
},
{
"reaction": "❤️",
"users": [
"Warung",
"osanseviero",
"tevykuch"
],
"count": 3
}
] | 2024-03-10T20:51:11.000Z | 2024-03-11T21:07:21.603Z | [] | /posts/DmitryRyumin/818418428056695 | 98 | 0 |
282259361762056 | [
{
"type": "text",
"value": "Quantize 7B paramater models in 60 seconds using Half Quadratic Quantization (HQQ).",
"raw": "Quantize 7B paramater models in 60 seconds using Half Quadratic Quantization (HQQ).",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "This game-changing technique allows for rapid quantization of models like Llama-2-70B in under 5 minutes, outperforming traditional methods by 50x in speed and offering high-quality compression without calibration data.",
"raw": "This game-changing technique allows for rapid quantization of models like Llama-2-70B in under 5 minutes, outperforming traditional methods by 50x in speed and offering high-quality compression without calibration data.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Mobius Labs innovative approach not only significantly reduces memory requirements but also enables the use of large models on consumer-grade GPUs, paving the way for more accessible and efficient machine learning research.",
"raw": "Mobius Labs innovative approach not only significantly reduces memory requirements but also enables the use of large models on consumer-grade GPUs, paving the way for more accessible and efficient machine learning research.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Mobius Labs' method utilizes a robust optimization formulation to determine the optimal quantization parameters, specifically targeting the minimization of errors between original and dequantized weights. This involves employing a loss function that promotes sparsity and utilizes a non-convex lp<1-norm, making the problem challenging yet solvable through a Half-Quadratic solver. ",
"raw": "Mobius Labs' method utilizes a robust optimization formulation to determine the optimal quantization parameters, specifically targeting the minimization of errors between original and dequantized weights. This involves employing a loss function that promotes sparsity and utilizes a non-convex lp<1-norm, making the problem challenging yet solvable through a Half-Quadratic solver. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "This solver simplifies the problem by introducing an extra variable and dividing the optimization into manageable sub-problems. Their implementation cleverly fixes the scale parameter to simplify calculations and focuses on optimizing the zero-point, utilizing closed-form solutions for each sub-problem to bypass the need for gradient calculations.",
"raw": "This solver simplifies the problem by introducing an extra variable and dividing the optimization into manageable sub-problems. Their implementation cleverly fixes the scale parameter to simplify calculations and focuses on optimizing the zero-point, utilizing closed-form solutions for each sub-problem to bypass the need for gradient calculations.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Check out the colab demo where you are able to quantize models (text generation and multimodal) for use with vLLM or Timm backend as well as transformers!",
"raw": "Check out the colab demo where you are able to quantize models (text generation and multimodal) for use with vLLM or Timm backend as well as transformers!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "AutoHQQ: 👉 ",
"raw": "AutoHQQ: 👉 ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://colab.research.google.com/drive/1cG_5R_u9q53Uond7F0JEdliwvoeeaXVN?usp=sharing",
"href": "https://colab.research.google.com/drive/1cG_5R_u9q53Uond7F0JEdliwvoeeaXVN?usp=sharing",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Code: ",
"raw": "Code: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/mobiusml/hqq",
"href": "https://github.com/mobiusml/hqq",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "HQQ Blog post: ",
"raw": "HQQ Blog post: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://mobiusml.github.io/hqq_blog/",
"href": "https://mobiusml.github.io/hqq_blog/",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Edit: Here is an example of how powerful HQQ can be: ",
"raw": "Edit: Here is an example of how powerful HQQ can be: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/macadeliccc/Nous-Hermes-2-Mixtral-8x7B-DPO-HQQ",
"href": null,
"resource": {
"type": "model",
"id": "macadeliccc/Nous-Hermes-2-Mixtral-8x7B-DPO-HQQ",
"discussionNum": null
},
"url": "https://huggingface.co/macadeliccc/Nous-Hermes-2-Mixtral-8x7B-DPO-HQQ",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Citations:",
"raw": "Citations:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@misc",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "misc",
"label": null,
"lang": null
},
{
"type": "text",
"value": "{badri2023hqq,",
"raw": "{badri2023hqq,",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "\ttitle = {Half-Quadratic Quantization of Large Machine Learning Models},",
"raw": "\ttitle = {Half-Quadratic Quantization of Large Machine Learning Models},",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "\turl = {https://mobiusml.github.io/hqq_blog/},",
"raw": "\turl = {https://mobiusml.github.io/hqq_blog/},",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "\tauthor = {Hicham Badri and Appu Shaji},",
"raw": "\tauthor = {Hicham Badri and Appu Shaji},",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "\tmonth = {November},",
"raw": "\tmonth = {November},",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "\tyear = {2023}",
"raw": "\tyear = {2023}",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "}",
"raw": "}",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Quantize 7B paramater models in 60 seconds using Half Quadratic Quantization (HQQ).
This game-changing technique allows for rapid quantization of models like Llama-2-70B in under 5 minutes, outperforming traditional methods by 50x in speed and offering high-quality compression without calibration data.
Mobius Labs innovative approach not only significantly reduces memory requirements but also enables the use of large models on consumer-grade GPUs, paving the way for more accessible and efficient machine learning research.
Mobius Labs' method utilizes a robust optimization formulation to determine the optimal quantization parameters, specifically targeting the minimization of errors between original and dequantized weights. This involves employing a loss function that promotes sparsity and utilizes a non-convex lp<1-norm, making the problem challenging yet solvable through a Half-Quadratic solver.
This solver simplifies the problem by introducing an extra variable and dividing the optimization into manageable sub-problems. Their implementation cleverly fixes the scale parameter to simplify calculations and focuses on optimizing the zero-point, utilizing closed-form solutions for each sub-problem to bypass the need for gradient calculations.
Check out the colab demo where you are able to quantize models (text generation and multimodal) for use with vLLM or Timm backend as well as transformers!
AutoHQQ: 👉 https://colab.research.google.com/drive/1cG_5R_u9q53Uond7F0JEdliwvoeeaXVN?usp=sharing
Code: https://github.com/mobiusml/hqq
HQQ Blog post: https://mobiusml.github.io/hqq_blog/
Edit: Here is an example of how powerful HQQ can be: https://huggingface.co/macadeliccc/Nous-Hermes-2-Mixtral-8x7B-DPO-HQQ
Citations:
@misc{badri2023hqq,
title = {Half-Quadratic Quantization of Large Machine Learning Models},
url = {https://mobiusml.github.io/hqq_blog/},
author = {Hicham Badri and Appu Shaji},
month = {November},
year = {2023}
}
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6455cc8d679315e4ef16fbec/M6Cfifn05BUzkCFd2QDIT.png",
"fullname": "Tim Dolan",
"name": "macadeliccc",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 152,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6455cc8d679315e4ef16fbec/Nlc7ZrY8z3LivdUiMbth_.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6455cc8d679315e4ef16fbec/IaFbTydo4H__M-kUIMFzs.png"
}
] | [
{
"avatarUrl": "/avatars/5de9756e825ada29842b31ddb928cc1e.svg",
"fullname": "MISC",
"name": "Misc",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null
}
] | [
{
"reaction": "❤️",
"users": [
"pabloce",
"samusenps",
"ajibawa-2023",
"damerajee",
"afrideva",
"finiteautomata",
"mobicham",
"appoose",
"seyf1elislam",
"Zyn123",
"dev7halo",
"hiepxanh",
"Lewdiculous",
"MH0386",
"avinash02",
"mathiasn1",
"FantasiaFoundry"
],
"count": 17
},
{
"reaction": "👍",
"users": [
"Warung",
"appoose",
"boqiangliang",
"hiepxanh",
"Lewdiculous",
"cnmoro",
"MH0386"
],
"count": 7
},
{
"reaction": "🧠",
"users": [
"Lewdiculous"
],
"count": 1
}
] | 2024-03-10T18:35:00.000Z | 2024-03-11T16:24:10.995Z | [] | /posts/macadeliccc/282259361762056 | 1,450 | 0 |
345421395614045 | [
{
"type": "text",
"value": "Over the past week, I've been putting Claude through its paces, focusing primarily on productivity tasks (you know, the good old BAU – Business As Usual). ",
"raw": "Over the past week, I've been putting Claude through its paces, focusing primarily on productivity tasks (you know, the good old BAU – Business As Usual). ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "1. Python/Torch/Transformers/AI/ML",
"raw": "1. Python/Torch/Transformers/AI/ML",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Right off the bat, I threw some complex AI/ML tasks at Claude, and I must say, it handled them with finesse. It even caught a few things that GPT missed! However, let's not get too carried away – we're not quite at the auto-code level just yet.",
"raw": "Right off the bat, I threw some complex AI/ML tasks at Claude, and I must say, it handled them with finesse. It even caught a few things that GPT missed! However, let's not get too carried away – we're not quite at the auto-code level just yet.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "2. Brainstorming",
"raw": "2. Brainstorming",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "This is where Claude falls a bit short. It seems to be more grounded than its competitors, which might not be ideal for generating novel ideas. If you're looking for a brainstorming partner, you might want to look elsewhere.",
"raw": "This is where Claude falls a bit short. It seems to be more grounded than its competitors, which might not be ideal for generating novel ideas. If you're looking for a brainstorming partner, you might want to look elsewhere.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "3. Attention",
"raw": "3. Attention",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Despite the claims of super-large attention in the paper, Claude's \"forgetting\" mechanism seems to be more pronounced. It tends to miss entire chunks of information rather than just specific details like GPT does.",
"raw": "Despite the claims of super-large attention in the paper, Claude's \"forgetting\" mechanism seems to be more pronounced. It tends to miss entire chunks of information rather than just specific details like GPT does.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "4. Following / Tasks",
"raw": "4. Following / Tasks",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "I hit a roadblock when Claude couldn't generate a LaTeX document. It's not the best at following complex, multi-step tasks.",
"raw": "I hit a roadblock when Claude couldn't generate a LaTeX document. It's not the best at following complex, multi-step tasks.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "5. Hallucinations",
"raw": "5. Hallucinations",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Oh boy, does Claude hallucinate! And when it does, it's on a whole new level of nonsense. The hallucinations seem to align with its grounded nature, making them even more convincing within the context of the prompt.",
"raw": "Oh boy, does Claude hallucinate! And when it does, it's on a whole new level of nonsense. The hallucinations seem to align with its grounded nature, making them even more convincing within the context of the prompt.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "6. Sycophancy",
"raw": "6. Sycophancy",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Claude is quite the people-pleaser. I've found that using an adversarial brainstorming approach is more beneficial and time-efficient, as it forces me to highlight Claude's mistakes rather than letting it focus on being a sweet, pleasant minion.",
"raw": "Claude is quite the people-pleaser. I've found that using an adversarial brainstorming approach is more beneficial and time-efficient, as it forces me to highlight Claude's mistakes rather than letting it focus on being a sweet, pleasant minion.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "7. Interface / UI",
"raw": "7. Interface / UI",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "There's definitely room for improvement here. Basic features like stepping back on a prompt and stopping generation with the ESC key are missing. These are essential for extracting and composing content effectively.",
"raw": "There's definitely room for improvement here. Basic features like stepping back on a prompt and stopping generation with the ESC key are missing. These are essential for extracting and composing content effectively.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Despite these limitations, I firmly believe that Claude is currently the #1 ",
"raw": "Despite these limitations, I firmly believe that Claude is currently the #1 ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Over the past week, I've been putting Claude through its paces, focusing primarily on productivity tasks (you know, the good old BAU – Business As Usual).
1. Python/Torch/Transformers/AI/ML
Right off the bat, I threw some complex AI/ML tasks at Claude, and I must say, it handled them with finesse. It even caught a few things that GPT missed! However, let's not get too carried away – we're not quite at the auto-code level just yet.
2. Brainstorming
This is where Claude falls a bit short. It seems to be more grounded than its competitors, which might not be ideal for generating novel ideas. If you're looking for a brainstorming partner, you might want to look elsewhere.
3. Attention
Despite the claims of super-large attention in the paper, Claude's "forgetting" mechanism seems to be more pronounced. It tends to miss entire chunks of information rather than just specific details like GPT does.
4. Following / Tasks
I hit a roadblock when Claude couldn't generate a LaTeX document. It's not the best at following complex, multi-step tasks.
5. Hallucinations
Oh boy, does Claude hallucinate! And when it does, it's on a whole new level of nonsense. The hallucinations seem to align with its grounded nature, making them even more convincing within the context of the prompt.
6. Sycophancy
Claude is quite the people-pleaser. I've found that using an adversarial brainstorming approach is more beneficial and time-efficient, as it forces me to highlight Claude's mistakes rather than letting it focus on being a sweet, pleasant minion.
7. Interface / UI
There's definitely room for improvement here. Basic features like stepping back on a prompt and stopping generation with the ESC key are missing. These are essential for extracting and composing content effectively.
Despite these limitations, I firmly believe that Claude is currently the #1 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6401c8c9f98fbc64bcd7dca1/MOSgc_mPbfUZ-354osy1v.png",
"fullname": "FBL",
"name": "fblgit",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 228,
"isFollowing": false
} | [] | [] | [
{
"reaction": "❤️",
"users": [
"samusenps",
"macadeliccc",
"osanseviero",
"victor",
"delano",
"Nymbo"
],
"count": 6
}
] | 2024-03-10T14:03:09.000Z | 2024-07-20T22:11:14.428Z | [
{
"avatarUrl": "/avatars/317d403f6da087c2b496426571763098.svg",
"fullname": "Иванов Константин",
"name": "jukkkk3n",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/UsYZUGNvR3ArHrDRzKKMU.jpeg",
"fullname": "nanaki seto",
"name": "nanakiseto",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6401c8c9f98fbc64bcd7dca1/MOSgc_mPbfUZ-354osy1v.png",
"fullname": "FBL",
"name": "fblgit",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 228,
"isFollowing": false
}
] | /posts/fblgit/345421395614045 | 1,705 | 4 |
891388422786050 | [
{
"type": "text",
"value": "LAST CHANCE TO TRY 🌟STARCODER2",
"raw": "LAST CHANCE TO TRY 🌟STARCODER2",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "After today it's gone !",
"raw": "After today it's gone !",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "actually not - just joking ! it's <3 open source !",
"raw": "actually not - just joking ! it's <3 open source !",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " just trying to get folks' attention to my featured \"Spaces of the Week\" :",
"raw": " just trying to get folks' attention to my featured \"Spaces of the Week\" :",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/Tonic/starcoder2",
"href": null,
"resource": {
"type": "space",
"id": "Tonic/starcoder2",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/Tonic/starcoder2",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "drop a like for your boy and join us next week for making fine tunes !",
"raw": "drop a like for your boy and join us next week for making fine tunes !",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | LAST CHANCE TO TRY 🌟STARCODER2
After today it's gone !
actually not - just joking ! it's <3 open source !
just trying to get folks' attention to my featured "Spaces of the Week" :
https://huggingface.co/spaces/Tonic/starcoder2
drop a like for your boy and join us next week for making fine tunes ! | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a3bb1cd0d8c2c2169f0b88/eT2TS0IlQbZtz-F_zHLz9.jpeg",
"fullname": "Joseph [open/acc] Pollack",
"name": "Tonic",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 313,
"isFollowing": false
} | [] | [] | [
{
"reaction": "❤️",
"users": [
"macadeliccc",
"flyineye",
"ForsakenRanger"
],
"count": 3
},
{
"reaction": "👍",
"users": [
"macadeliccc"
],
"count": 1
}
] | 2024-03-10T13:43:48.000Z | 2024-03-10T13:43:48.920Z | [] | /posts/Tonic/891388422786050 | 465 | 0 |
837823795372445 | [
{
"type": "text",
"value": "Multi-Instance Generation Controller: Enjoy complete control over position generation, attribute determination, and count! ",
"raw": "Multi-Instance Generation Controller: Enjoy complete control over position generation, attribute determination, and count! ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "code link: ",
"raw": "code link: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/limuloo/MIGC",
"href": "https://github.com/limuloo/MIGC",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "project page: ",
"raw": "project page: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://migcproject.github.io/",
"href": "https://migcproject.github.io/",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "MIGC decouples multi-instance generation into individual single-instance generation subtasks within the cross-attention layer of Stable Diffusion. ",
"raw": "MIGC decouples multi-instance generation into individual single-instance generation subtasks within the cross-attention layer of Stable Diffusion. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Welcome to follow our project and use the code to create anything you imagine!",
"raw": "Welcome to follow our project and use the code to create anything you imagine!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Please let us know if you have any suggestions!",
"raw": "Please let us know if you have any suggestions!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Multi-Instance Generation Controller: Enjoy complete control over position generation, attribute determination, and count!
code link: https://github.com/limuloo/MIGC
project page: https://migcproject.github.io/
MIGC decouples multi-instance generation into individual single-instance generation subtasks within the cross-attention layer of Stable Diffusion.
Welcome to follow our project and use the code to create anything you imagine!
Please let us know if you have any suggestions!
| {
"avatarUrl": "/avatars/9e24b3528774a4d7635c800b29a3588e.svg",
"fullname": "Fan",
"name": "Flowerfan",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 11,
"isFollowing": false
} | [
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/652d3da8072563249e8c7824/7BKpM0hf0XUssZeWFug_m.mp4"
}
] | [] | [
{
"reaction": "👍",
"users": [
"Flowerfan",
"limuloo1999",
"little-air-dust",
"LeyRio",
"xiaodi",
"sanaka87",
"wenhaozzu",
"yqqCheergo",
"Whalesong",
"Csplk",
"mingzs2",
"samusenps",
"taufiqdp",
"Jouryjc",
"osanseviero",
"Warung",
"George-Blaze",
"danniel2023",
"linoyts",
"Jakaline",
"ariarcus",
"erickdp",
"radames"
],
"count": 23
},
{
"reaction": "❤️",
"users": [
"limuloo1999",
"xiaodi",
"sanaka87",
"Shinku",
"Whalesong",
"mingzs2",
"samusenps",
"vvvvJ",
"osanseviero",
"danniel2023",
"linoyts",
"shashankbodapati",
"radames",
"jeffboudier"
],
"count": 14
},
{
"reaction": "🤗",
"users": [
"limuloo1999",
"xiaodi",
"sanaka87",
"Whalesong",
"mingzs2",
"radames"
],
"count": 6
},
{
"reaction": "🤝",
"users": [
"limuloo1999",
"xiaodi",
"sanaka87",
"Whalesong",
"mingzs2"
],
"count": 5
},
{
"reaction": "😔",
"users": [
"mingzs2"
],
"count": 1
},
{
"reaction": "🤯",
"users": [
"mingzs2"
],
"count": 1
}
] | 2024-03-10T05:46:02.000Z | 2024-03-10T14:02:32.027Z | [
{
"avatarUrl": "/avatars/06ee5dff564cc4e71fed5e8dc0a7c1d4.svg",
"fullname": "JingHuang",
"name": "little-air-dust",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "/avatars/beecd135bb940fdc02406f9063b3fa67.svg",
"fullname": "Dewei Zhou",
"name": "limuloo1999",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2,
"isFollowing": false
},
{
"avatarUrl": "/avatars/a52a27802975fce32acac72588cc880b.svg",
"fullname": "LiNo3dy",
"name": "xiaodi",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "/avatars/e8e4953f075c410ea0e6b06f7145913c.svg",
"fullname": "谢集",
"name": "sanaka87",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "/avatars/6f946d3b5fb07936d2fd6d4fa7598b85.svg",
"fullname": "zwh",
"name": "wenhaozzu",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "/avatars/d95e30686bcaaa2ddd4b337c1afe0fec.svg",
"fullname": "Wenjie Wei",
"name": "vvvvJ",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
}
] | /posts/Flowerfan/837823795372445 | 98 | 6 |
720093499364569 | [
{
"type": "text",
"value": "Updating PaperQA Gradio app and Hugging Face Space.",
"raw": "Updating PaperQA Gradio app and Hugging Face Space.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": ": Link ➡️ ",
"raw": ": Link ➡️ ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/chansung/paper_qa",
"href": null,
"resource": {
"type": "space",
"id": "chansung/paper_qa",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/chansung/paper_qa",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": ": Standalone repo ➡️ ",
"raw": ": Standalone repo ➡️ ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/deep-diver/paperqa-ui",
"href": "https://github.com/deep-diver/paperqa-ui",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The final goal is to let ppl have their own paper archive. At the end, You will be able to easily *clone* on local or Hugging Face Space with Google's Gemini API Key (which is free), Hugging Face Access Token. You can just drop arXiv IDs at the bottom, then all the auto analyze papers are automatically archived on Hugging Face Dataset repo.",
"raw": "The final goal is to let ppl have their own paper archive. At the end, You will be able to easily *clone* on local or Hugging Face Space with Google's Gemini API Key (which is free), Hugging Face Access Token. You can just drop arXiv IDs at the bottom, then all the auto analyze papers are automatically archived on Hugging Face Dataset repo.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Here are few updates included, and dig in the source code if you want similar features for your use cases!",
"raw": "Here are few updates included, and dig in the source code if you want similar features for your use cases!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🖥️ making complex UI + fully responsive",
"raw": "🖥️ making complex UI + fully responsive",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "+ making UI as quickly as possible (avoid server-client when possible)",
"raw": "+ making UI as quickly as possible (avoid server-client when possible)",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "💬 Permanent Chat history management with in-browser local storage",
"raw": "💬 Permanent Chat history management with in-browser local storage",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "+ Chat history management *per* paper",
"raw": "+ Chat history management *per* paper",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "+ Chat history management in lazy mode (too many paper, impossible to create chat history for every single paper beforehand, hence)",
"raw": "+ Chat history management in lazy mode (too many paper, impossible to create chat history for every single paper beforehand, hence)",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Current plan is to support Gemini and any open source models on Hugging Face PRO account, but will expand it to GPT4 soon.",
"raw": "Current plan is to support Gemini and any open source models on Hugging Face PRO account, but will expand it to GPT4 soon.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Any suggestion on this project is welcome! possibly,",
"raw": "Any suggestion on this project is welcome! possibly,",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- hooking up RAG system (open models' context length is small)",
"raw": "- hooking up RAG system (open models' context length is small)",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- hooking up Internet search system",
"raw": "- hooking up Internet search system",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- image/figure analysis",
"raw": "- image/figure analysis",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "....",
"raw": "....",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Updating PaperQA Gradio app and Hugging Face Space.
: Link ➡️ https://huggingface.co/spaces/chansung/paper_qa
: Standalone repo ➡️ https://github.com/deep-diver/paperqa-ui
The final goal is to let ppl have their own paper archive. At the end, You will be able to easily *clone* on local or Hugging Face Space with Google's Gemini API Key (which is free), Hugging Face Access Token. You can just drop arXiv IDs at the bottom, then all the auto analyze papers are automatically archived on Hugging Face Dataset repo.
Here are few updates included, and dig in the source code if you want similar features for your use cases!
🖥️ making complex UI + fully responsive
+ making UI as quickly as possible (avoid server-client when possible)
💬 Permanent Chat history management with in-browser local storage
+ Chat history management *per* paper
+ Chat history management in lazy mode (too many paper, impossible to create chat history for every single paper beforehand, hence)
Current plan is to support Gemini and any open source models on Hugging Face PRO account, but will expand it to GPT4 soon.
Any suggestion on this project is welcome! possibly,
- hooking up RAG system (open models' context length is small)
- hooking up Internet search system
- image/figure analysis
.... | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1659971187637-60d3b57ad7b174177faabd6e.jpeg",
"fullname": "chansung park",
"name": "chansung",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 2695,
"isFollowing": false
} | [
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/60d3b57ad7b174177faabd6e/NdYVwFoJmj8ysW-gmRQo8.mp4"
}
] | [] | [
{
"reaction": "👍",
"users": [
"George-Blaze",
"chansung",
"Tonic",
"osanseviero"
],
"count": 4
},
{
"reaction": "❤️",
"users": [
"DmitryRyumin",
"samusenps",
"osanseviero"
],
"count": 3
}
] | 2024-03-10T02:28:27.000Z | 2024-03-10T03:49:01.643Z | [] | /posts/chansung/720093499364569 | 196 | 0 |
130375370855805 | [
{
"type": "text",
"value": "Keep stacking cool stuff and getting better results! After I changed the standard vision encoder to SigLIP, NLLB-CLIP got a 10% average performance improvement. And now, I added matryoshka layers (",
"raw": "Keep stacking cool stuff and getting better results! After I changed the standard vision encoder to SigLIP, NLLB-CLIP got a 10% average performance improvement. And now, I added matryoshka layers (",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://arxiv.org/abs/2205.13147",
"href": "https://arxiv.org/abs/2205.13147",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": ") to enable smaller embeddings and got another 6% performance boost! Plus, thanks to MRL, 4.5x smaller embeddings retain 90%+ quality.",
"raw": ") to enable smaller embeddings and got another 6% performance boost! Plus, thanks to MRL, 4.5x smaller embeddings retain 90%+ quality.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The large model is finally SoTA for both image and text multilingual retrieval!",
"raw": "The large model is finally SoTA for both image and text multilingual retrieval!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The models are available on the hub:",
"raw": "The models are available on the hub:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- ",
"raw": "- ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/visheratin/nllb-siglip-mrl-base",
"href": null,
"resource": {
"type": "model",
"id": "visheratin/nllb-siglip-mrl-base",
"discussionNum": null
},
"url": "https://huggingface.co/visheratin/nllb-siglip-mrl-base",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- ",
"raw": "- ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/visheratin/nllb-siglip-mrl-large",
"href": null,
"resource": {
"type": "model",
"id": "visheratin/nllb-siglip-mrl-large",
"discussionNum": null
},
"url": "https://huggingface.co/visheratin/nllb-siglip-mrl-large",
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Keep stacking cool stuff and getting better results! After I changed the standard vision encoder to SigLIP, NLLB-CLIP got a 10% average performance improvement. And now, I added matryoshka layers (https://arxiv.org/abs/2205.13147) to enable smaller embeddings and got another 6% performance boost! Plus, thanks to MRL, 4.5x smaller embeddings retain 90%+ quality.
The large model is finally SoTA for both image and text multilingual retrieval!
The models are available on the hub:
- https://huggingface.co/visheratin/nllb-siglip-mrl-base
- https://huggingface.co/visheratin/nllb-siglip-mrl-large | {
"avatarUrl": "/avatars/b892a3d50b2f8ead0a5f7108564e45d0.svg",
"fullname": "Alexander Visheratin",
"name": "visheratin",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 55,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/609ede05121df5de54007033/DOefnxOxJLLwflAjpHXel.png"
}
] | [] | [
{
"reaction": "❤️",
"users": [
"samusenps",
"osanseviero",
"afrideva",
"zhanglu"
],
"count": 4
},
{
"reaction": "🤯",
"users": [
"Tonic",
"osanseviero"
],
"count": 2
},
{
"reaction": "🤗",
"users": [
"bmorphism",
"dark-pen"
],
"count": 2
},
{
"reaction": "👍",
"users": [
"dashfunnydashdash"
],
"count": 1
}
] | 2024-03-10T01:57:38.000Z | 2024-03-12T18:42:05.839Z | [
{
"avatarUrl": "/avatars/afbc48df2e8c47c35be48168113d83c0.svg",
"fullname": "s",
"name": "Tom-Neverwinter",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2,
"isFollowing": false
},
{
"avatarUrl": "/avatars/b892a3d50b2f8ead0a5f7108564e45d0.svg",
"fullname": "Alexander Visheratin",
"name": "visheratin",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 55,
"isFollowing": false
}
] | /posts/visheratin/130375370855805 | 177 | 2 |
868821264219482 | [
{
"type": "text",
"value": "Retrieval-Augmented Generation (RAG)",
"raw": "Retrieval-Augmented Generation (RAG)",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Redeemer of the \"hallucination problem\"",
"raw": "Redeemer of the \"hallucination problem\"",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "It is fair enough to argue that \"hallucinations\" in LLMs are just mere reflections of what we humans occasionally do - well it gets worse as we get older, but these models are brain inspired, thus such behaviors are likely inherently unavoidable. After all, we are just dreamers trying make sense of this life.",
"raw": "It is fair enough to argue that \"hallucinations\" in LLMs are just mere reflections of what we humans occasionally do - well it gets worse as we get older, but these models are brain inspired, thus such behaviors are likely inherently unavoidable. After all, we are just dreamers trying make sense of this life.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The best we can do is minimize and control it - but humanly how? By first feeding on relevant facts and then developing a habit that allows us to easily access those facts when needed. This is what RAG is all about - it's just a control mechanism that keeps the LLM aligned with reality and fact.",
"raw": "The best we can do is minimize and control it - but humanly how? By first feeding on relevant facts and then developing a habit that allows us to easily access those facts when needed. This is what RAG is all about - it's just a control mechanism that keeps the LLM aligned with reality and fact.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "But How Does RAG Work?",
"raw": "But How Does RAG Work?",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Well, to some extent it is domain-specific but the overall workflow boils down to the following:",
"raw": "Well, to some extent it is domain-specific but the overall workflow boils down to the following:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "1. It makes use of a retrieval mechanism that hunts for facts relevant to a query - this involves an end-to-end backpropagation that leverages a retriever (Query Encoder + Document Index or Source of Truth) with a pre-trained generative model.",
"raw": "1. It makes use of a retrieval mechanism that hunts for facts relevant to a query - this involves an end-to-end backpropagation that leverages a retriever (Query Encoder + Document Index or Source of Truth) with a pre-trained generative model.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "2. The generative model then uses the facts retrieved, performs some verification to give a more accurate response.",
"raw": "2. The generative model then uses the facts retrieved, performs some verification to give a more accurate response.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "To summarize, the RAG architecture houses a pre-existing knowledge source model (termed parametric memory), which then utilizes a Source-of-Truth model or vector indexed data (termed non-parametric memory) that is accessed by a pre-trained neural retriever, in order to produce more informed, contextually appropriate and factually correct responses.",
"raw": "To summarize, the RAG architecture houses a pre-existing knowledge source model (termed parametric memory), which then utilizes a Source-of-Truth model or vector indexed data (termed non-parametric memory) that is accessed by a pre-trained neural retriever, in order to produce more informed, contextually appropriate and factually correct responses.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Sort of a \"Genius Engine\" if you might say. If only we humans could harness such, AGI would be much much sooner lol.",
"raw": "Sort of a \"Genius Engine\" if you might say. If only we humans could harness such, AGI would be much much sooner lol.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "In the meantime, I have been Jaward Sesay (Chinese name 苏杰 Sujie) - a young Sierra Leonean, aspiring AI Researcher. I like to read, share and try implementing AI research papers. Also like dunking on big tech while rooting for open-source. My mentor ",
"raw": "In the meantime, I have been Jaward Sesay (Chinese name 苏杰 Sujie) - a young Sierra Leonean, aspiring AI Researcher. I like to read, share and try implementing AI research papers. Also like dunking on big tech while rooting for open-source. My mentor ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@karpathy",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "karpathy",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", I dream of him following me back on X lol. Thanks.",
"raw": ", I dream of him following me back on X lol. Thanks.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Retrieval-Augmented Generation (RAG)
Redeemer of the "hallucination problem"
It is fair enough to argue that "hallucinations" in LLMs are just mere reflections of what we humans occasionally do - well it gets worse as we get older, but these models are brain inspired, thus such behaviors are likely inherently unavoidable. After all, we are just dreamers trying make sense of this life.
The best we can do is minimize and control it - but humanly how? By first feeding on relevant facts and then developing a habit that allows us to easily access those facts when needed. This is what RAG is all about - it's just a control mechanism that keeps the LLM aligned with reality and fact.
But How Does RAG Work?
Well, to some extent it is domain-specific but the overall workflow boils down to the following:
1. It makes use of a retrieval mechanism that hunts for facts relevant to a query - this involves an end-to-end backpropagation that leverages a retriever (Query Encoder + Document Index or Source of Truth) with a pre-trained generative model.
2. The generative model then uses the facts retrieved, performs some verification to give a more accurate response.
To summarize, the RAG architecture houses a pre-existing knowledge source model (termed parametric memory), which then utilizes a Source-of-Truth model or vector indexed data (termed non-parametric memory) that is accessed by a pre-trained neural retriever, in order to produce more informed, contextually appropriate and factually correct responses.
Sort of a "Genius Engine" if you might say. If only we humans could harness such, AGI would be much much sooner lol.
In the meantime, I have been Jaward Sesay (Chinese name 苏杰 Sujie) - a young Sierra Leonean, aspiring AI Researcher. I like to read, share and try implementing AI research papers. Also like dunking on big tech while rooting for open-source. My mentor @karpathy, I dream of him following me back on X lol. Thanks.
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6438a9027de34e8ea7e4b257/vib8QSd1AWMr_bR9ig_xJ.jpeg",
"fullname": "Jaward Sesay",
"name": "Jaward",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 191,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/rX1ed4H6oQheZ6BqMbOBF.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/jDdSjqNcZPw1zSfdlI8Bb.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/9cF7NBqSVxUfO46eBnFUK.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/tx9mc3wjSWMaujzMBBfLZ.png"
}
] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1660434061546-62f83661fe21cc4875221c0f.jpeg",
"fullname": "Andrej K",
"name": "karpathy",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 476
}
] | [
{
"reaction": "❤️",
"users": [
"samusenps",
"osanseviero",
"paralym",
"Jafta",
"rohitdavas"
],
"count": 5
}
] | 2024-03-10T01:48:00.000Z | 2024-03-24T08:15:36.872Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/WeO0AziKGQ9OLFjWmXfee.jpeg",
"fullname": "Rohit Kumar",
"name": "rohitdavas",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6438a9027de34e8ea7e4b257/vib8QSd1AWMr_bR9ig_xJ.jpeg",
"fullname": "Jaward Sesay",
"name": "Jaward",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 191,
"isFollowing": false
}
] | /posts/Jaward/868821264219482 | 56 | 2 |
750681599931706 | [
{
"type": "text",
"value": "A recent paper titled \"Finetuned Multimodal Language Models Are High-Quality Image-Text Data Filters\" proposes using fine-tuned Multimodal Language Models (MLMs) as high-quality filters for image-text data. ",
"raw": "A recent paper titled \"Finetuned Multimodal Language Models Are High-Quality Image-Text Data Filters\" proposes using fine-tuned Multimodal Language Models (MLMs) as high-quality filters for image-text data. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Key points:",
"raw": "Key points:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "* Defines multiple metrics to assess image-text quality from different perspectives like object details, text quality, and semantic understanding.",
"raw": "* Defines multiple metrics to assess image-text quality from different perspectives like object details, text quality, and semantic understanding.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "* Leverages GPT-4 and GPT-4V to construct high-quality instruction data for fine-tuning open-source MLMs as effective data filters.",
"raw": "* Leverages GPT-4 and GPT-4V to construct high-quality instruction data for fine-tuning open-source MLMs as effective data filters.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "* Fine-tuned MLM filters generate more precise scores, leading to better filtered data and improved performance of pre-trained models on various downstream tasks.",
"raw": "* Fine-tuned MLM filters generate more precise scores, leading to better filtered data and improved performance of pre-trained models on various downstream tasks.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Congrats to the authors for their work!",
"raw": "Congrats to the authors for their work!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Paper: ",
"raw": "Paper: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2403.02677",
"href": null,
"resource": {
"type": "paper",
"id": "2403.02677",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2403.02677",
"code": null,
"user": null,
"label": "Finetuned Multimodal Language Models Are High-Quality Image-Text Data\n Filters (2403.02677)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Code: ",
"raw": "Code: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/Victorwz/MLM_Filter",
"href": "https://github.com/Victorwz/MLM_Filter",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Dataset: ",
"raw": "Dataset: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/weizhiwang/mlm_filter_instructions",
"href": null,
"resource": {
"type": "dataset",
"id": "weizhiwang/mlm_filter_instructions",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/weizhiwang/mlm_filter_instructions",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Model: ",
"raw": "Model: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/weizhiwang/mlm-filter-llava-13b-gpt4v",
"href": null,
"resource": {
"type": "model",
"id": "weizhiwang/mlm-filter-llava-13b-gpt4v",
"discussionNum": null
},
"url": "https://huggingface.co/weizhiwang/mlm-filter-llava-13b-gpt4v",
"code": null,
"user": null,
"label": null,
"lang": null
}
] | A recent paper titled "Finetuned Multimodal Language Models Are High-Quality Image-Text Data Filters" proposes using fine-tuned Multimodal Language Models (MLMs) as high-quality filters for image-text data.
Key points:
* Defines multiple metrics to assess image-text quality from different perspectives like object details, text quality, and semantic understanding.
* Leverages GPT-4 and GPT-4V to construct high-quality instruction data for fine-tuning open-source MLMs as effective data filters.
* Fine-tuned MLM filters generate more precise scores, leading to better filtered data and improved performance of pre-trained models on various downstream tasks.
Congrats to the authors for their work!
Paper: https://huggingface.co/papers/2403.02677
Code: https://github.com/Victorwz/MLM_Filter
Dataset: https://huggingface.co/datasets/weizhiwang/mlm_filter_instructions
Model: https://huggingface.co/weizhiwang/mlm-filter-llava-13b-gpt4v | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/657217faabb25ed8aedd5e48/UUHAXeGtOnQBXFD3nYtf2.jpeg",
"fullname": "Vlad Bogolin",
"name": "vladbogo",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 109,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/657217faabb25ed8aedd5e48/pzt37kTjpWQ2X1UD3dtGA.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/657217faabb25ed8aedd5e48/WyX-2hlaMzYwgD0W2IjrI.png"
}
] | [] | [
{
"reaction": "🤗",
"users": [
"Kukedlc",
"rohitdavas",
"thomwolf"
],
"count": 3
},
{
"reaction": "👍",
"users": [
"samusenps",
"Danielpluto",
"thomwolf"
],
"count": 3
},
{
"reaction": "❤️",
"users": [
"samusenps",
"thomwolf"
],
"count": 2
}
] | 2024-03-09T22:24:15.000Z | 2024-03-09T22:24:15.378Z | [] | /posts/vladbogo/750681599931706 | 59 | 0 |
906785325455792 | [
{
"type": "text",
"value": "Introducing the 🤗 Transformers.js WebGPU Embedding Benchmark! ⚡️",
"raw": "Introducing the 🤗 Transformers.js WebGPU Embedding Benchmark! ⚡️",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "👉 ",
"raw": "👉 ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/Xenova/webgpu-embedding-benchmark",
"href": null,
"resource": {
"type": "space",
"id": "Xenova/webgpu-embedding-benchmark",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/Xenova/webgpu-embedding-benchmark",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " 👈",
"raw": " 👈",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "On my device, I was able to achieve a 64.04x speedup over WASM! 🤯 How much does WebGPU speed up ML models running locally in your browser? Try it out and share your results! 🚀",
"raw": "On my device, I was able to achieve a 64.04x speedup over WASM! 🤯 How much does WebGPU speed up ML models running locally in your browser? Try it out and share your results! 🚀",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Introducing the 🤗 Transformers.js WebGPU Embedding Benchmark! ⚡️
👉 https://huggingface.co/spaces/Xenova/webgpu-embedding-benchmark 👈
On my device, I was able to achieve a 64.04x speedup over WASM! 🤯 How much does WebGPU speed up ML models running locally in your browser? Try it out and share your results! 🚀 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61b253b7ac5ecaae3d1efe0c/hwiQ0uvz3t-L5a-NtBIO6.png",
"fullname": "Joshua",
"name": "Xenova",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 3792,
"isFollowing": false
} | [
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/61b253b7ac5ecaae3d1efe0c/NLgIMzMx3bbpAv1OwVMNh.mp4"
}
] | [] | [
{
"reaction": "❤️",
"users": [
"samusenps",
"omaryshchenko",
"thomwolf",
"osanseviero",
"Seuriin",
"Tonic",
"ozzyonfire",
"amiratatomic",
"abidlabs",
"hogunkim",
"felixdrp",
"leekung",
"Noomam",
"xnohat",
"do-me",
"mikestaub"
],
"count": 16
},
{
"reaction": "👍",
"users": [
"mvaloatto",
"Tonic",
"kramp",
"abidlabs",
"pogzyb",
"cklam12345"
],
"count": 6
},
{
"reaction": "🤝",
"users": [
"Tonic"
],
"count": 1
},
{
"reaction": "🤯",
"users": [
"Tonic"
],
"count": 1
},
{
"reaction": "🤗",
"users": [
"Tonic"
],
"count": 1
}
] | 2024-03-09T20:23:56.000Z | 2024-07-23T16:41:26.102Z | [
{
"avatarUrl": "/avatars/ec7b676c22a9249c4eab9931c94b0d79.svg",
"fullname": "Hieu Nguyen",
"name": "thegums",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "/avatars/93da2fb63884b9a938cb65c47d97000c.svg",
"fullname": "hugging",
"name": "fhieni",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/648715410492e52144dc4d69/GNOJ29JAmyS3-GvOSVNui.jpeg",
"fullname": "Daniel Demmel",
"name": "daaain",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
}
] | /posts/Xenova/906785325455792 | 7,635 | 3 |
633758457910104 | [
{
"type": "text",
"value": "Diaries of Open Source. Part 3! OS goes to the moon!",
"raw": "Diaries of Open Source. Part 3! OS goes to the moon!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "💻 OpenCodeInterpreter, a family of very powerful code generation models",
"raw": "💻 OpenCodeInterpreter, a family of very powerful code generation models",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Models: ",
"raw": "Models: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/m-a-p/opencodeinterpreter-65d312f6f88da990a64da456",
"href": null,
"resource": {
"type": "collection",
"id": "m-a-p/opencodeinterpreter-65d312f6f88da990a64da456",
"discussionNum": null
},
"url": "https://huggingface.co/collections/m-a-p/opencodeinterpreter-65d312f6f88da990a64da456",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Paper: ",
"raw": "Paper: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2402.14658",
"href": null,
"resource": {
"type": "paper",
"id": "2402.14658",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2402.14658",
"code": null,
"user": null,
"label": "OpenCodeInterpreter: Integrating Code Generation with Execution and\n Refinement (2402.14658)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Demo ",
"raw": "Demo ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/m-a-p/OpenCodeInterpreter_demo",
"href": null,
"resource": {
"type": "space",
"id": "m-a-p/OpenCodeInterpreter_demo",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/m-a-p/OpenCodeInterpreter_demo",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔷🔶Zephyr 7B Gemma, Gemma fine-tuned with the Zephyr recipe ",
"raw": "🔷🔶Zephyr 7B Gemma, Gemma fine-tuned with the Zephyr recipe ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Model: ",
"raw": "Model: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1",
"href": null,
"resource": {
"type": "model",
"id": "HuggingFaceH4/zephyr-7b-gemma-v0.1",
"discussionNum": null
},
"url": "https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Demo: ",
"raw": "Demo: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/HuggingFaceH4/zephyr-7b-gemma-chat",
"href": null,
"resource": {
"type": "space",
"id": "HuggingFaceH4/zephyr-7b-gemma-chat",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/HuggingFaceH4/zephyr-7b-gemma-chat",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "GH Repo: ",
"raw": "GH Repo: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/huggingface/alignment-handbook",
"href": "https://github.com/huggingface/alignment-handbook",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🪆The MixedBread folks released a 2D Matryoshka text embedding model, which means you can dynamically change the embedding size and layer counts ",
"raw": "🪆The MixedBread folks released a 2D Matryoshka text embedding model, which means you can dynamically change the embedding size and layer counts ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Model: ",
"raw": "Model: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/mixedbread-ai/mxbai-embed-2d-large-v1",
"href": null,
"resource": {
"type": "model",
"id": "mixedbread-ai/mxbai-embed-2d-large-v1",
"discussionNum": null
},
"url": "https://huggingface.co/mixedbread-ai/mxbai-embed-2d-large-v1",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Release blog post: ",
"raw": "Release blog post: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://www.mixedbread.ai/blog/mxbai-embed-2d-large-v1",
"href": "https://www.mixedbread.ai/blog/mxbai-embed-2d-large-v1",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🐋Microsoft released Orca Math, which includes 200K grade school math problems",
"raw": "🐋Microsoft released Orca Math, which includes 200K grade school math problems",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Dataset: ",
"raw": "Dataset: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/microsoft/orca-math-word-problems-200k",
"href": null,
"resource": {
"type": "dataset",
"id": "microsoft/orca-math-word-problems-200k",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/microsoft/orca-math-word-problems-200k",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🥷IBM silently released Merlinite, a cool model trained on Mixtral-generated synthetic data using a novel LAB method ",
"raw": "🥷IBM silently released Merlinite, a cool model trained on Mixtral-generated synthetic data using a novel LAB method ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/ibm/merlinite-7b",
"href": null,
"resource": {
"type": "model",
"id": "ibm/merlinite-7b",
"discussionNum": null
},
"url": "https://huggingface.co/ibm/merlinite-7b",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🌚 Moondream2 - a small vision language model to run on-device!",
"raw": "🌚 Moondream2 - a small vision language model to run on-device!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Model: ",
"raw": "Model: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/vikhyatk/moondream2",
"href": null,
"resource": {
"type": "model",
"id": "vikhyatk/moondream2",
"discussionNum": null
},
"url": "https://huggingface.co/vikhyatk/moondream2",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Demo: ",
"raw": "Demo: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/vikhyatk/moondream2",
"href": null,
"resource": {
"type": "space",
"id": "vikhyatk/moondream2",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/vikhyatk/moondream2",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🏙️CityDreamer: 3D City Generation",
"raw": "🏙️CityDreamer: 3D City Generation",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Demo: ",
"raw": "Demo: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/hzxie/city-dreamer",
"href": null,
"resource": {
"type": "space",
"id": "hzxie/city-dreamer",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/hzxie/city-dreamer",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Repo: ",
"raw": "Repo: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/hzxie/city-dreamer",
"href": "https://github.com/hzxie/city-dreamer",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Model: ",
"raw": "Model: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/hzxie/city-dreamer",
"href": null,
"resource": {
"type": "model",
"id": "hzxie/city-dreamer",
"discussionNum": null
},
"url": "https://huggingface.co/hzxie/city-dreamer",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🌏ML in all languages",
"raw": "🌏ML in all languages",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Sailor, a family of South-East Asian languages models ",
"raw": "Sailor, a family of South-East Asian languages models ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/sail/sailor-language-models-65e19a749f978976f1959825",
"href": null,
"resource": {
"type": "collection",
"id": "sail/sailor-language-models-65e19a749f978976f1959825",
"discussionNum": null
},
"url": "https://huggingface.co/collections/sail/sailor-language-models-65e19a749f978976f1959825",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Samvaad dataset, which includes 140k QA pairs in Hindi, Bengali, Marathi, Tamil, Telugu, Oriya, Punjabi, and Gujarati ",
"raw": "Samvaad dataset, which includes 140k QA pairs in Hindi, Bengali, Marathi, Tamil, Telugu, Oriya, Punjabi, and Gujarati ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/GenVRadmin/Samvaad-Mixed-Language-2",
"href": null,
"resource": {
"type": "dataset",
"id": "GenVRadmin/Samvaad-Mixed-Language-2",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/GenVRadmin/Samvaad-Mixed-Language-2",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "You can see the previous part at ",
"raw": "You can see the previous part at ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/posts/osanseviero/674644082063278",
"href": "https://huggingface.co/posts/osanseviero/674644082063278",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Diaries of Open Source. Part 3! OS goes to the moon!
💻 OpenCodeInterpreter, a family of very powerful code generation models
Models: https://huggingface.co/collections/m-a-p/opencodeinterpreter-65d312f6f88da990a64da456
Paper: https://huggingface.co/papers/2402.14658
Demo https://huggingface.co/spaces/m-a-p/OpenCodeInterpreter_demo
🔷🔶Zephyr 7B Gemma, Gemma fine-tuned with the Zephyr recipe
Model: https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1
Demo: https://huggingface.co/spaces/HuggingFaceH4/zephyr-7b-gemma-chat
GH Repo: https://github.com/huggingface/alignment-handbook
🪆The MixedBread folks released a 2D Matryoshka text embedding model, which means you can dynamically change the embedding size and layer counts
Model: https://huggingface.co/mixedbread-ai/mxbai-embed-2d-large-v1
Release blog post: https://www.mixedbread.ai/blog/mxbai-embed-2d-large-v1
🐋Microsoft released Orca Math, which includes 200K grade school math problems
Dataset: https://huggingface.co/datasets/microsoft/orca-math-word-problems-200k
🥷IBM silently released Merlinite, a cool model trained on Mixtral-generated synthetic data using a novel LAB method https://huggingface.co/ibm/merlinite-7b
🌚 Moondream2 - a small vision language model to run on-device!
Model: https://huggingface.co/vikhyatk/moondream2
Demo: https://huggingface.co/spaces/vikhyatk/moondream2
🏙️CityDreamer: 3D City Generation
Demo: https://huggingface.co/spaces/hzxie/city-dreamer
Repo: https://github.com/hzxie/city-dreamer
Model: https://huggingface.co/hzxie/city-dreamer
🌏ML in all languages
Sailor, a family of South-East Asian languages models https://huggingface.co/collections/sail/sailor-language-models-65e19a749f978976f1959825
Samvaad dataset, which includes 140k QA pairs in Hindi, Bengali, Marathi, Tamil, Telugu, Oriya, Punjabi, and Gujarati https://huggingface.co/datasets/GenVRadmin/Samvaad-Mixed-Language-2
You can see the previous part at https://huggingface.co/posts/osanseviero/674644082063278 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6032802e1f993496bc14d9e3/w6hr-DEQot4VVkoyRIBiy.png",
"fullname": "Omar Sanseviero",
"name": "osanseviero",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2868,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👍",
"users": [
"DmitryRyumin",
"fffiloni",
"samusenps",
"vladbogo",
"unclecode",
"visheratin",
"ajibawa-2023",
"mvaloatto",
"mikonvergence",
"Makya"
],
"count": 10
}
] | 2024-03-09T19:49:34.000Z | 2024-03-10T02:59:41.535Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64aea8ff67511bd3d965697b/Jxn52EmDF5RApJh8antxn.jpeg",
"fullname": "Feynman Innovations",
"name": "ajibawa-2023",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 138,
"isFollowing": false
}
] | /posts/osanseviero/633758457910104 | 68 | 1 |
726784982333766 | [
{
"type": "text",
"value": "I have just published my first blog post.",
"raw": "I have just published my first blog post.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "While FlashAttention has been readily integrated into HuggingFace transformers, there are much higher gains to be had (at least theoretically) for finetuning models with examples of variable sequence lengths in a batch.",
"raw": "While FlashAttention has been readily integrated into HuggingFace transformers, there are much higher gains to be had (at least theoretically) for finetuning models with examples of variable sequence lengths in a batch.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "For a deeper dive, please go through my blog at ",
"raw": "For a deeper dive, please go through my blog at ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/blog/mayank-mishra/padding-free-transformer",
"href": "https://huggingface.co/blog/mayank-mishra/padding-free-transformer",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": ".",
"raw": ".",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | I have just published my first blog post.
While FlashAttention has been readily integrated into HuggingFace transformers, there are much higher gains to be had (at least theoretically) for finetuning models with examples of variable sequence lengths in a batch.
For a deeper dive, please go through my blog at https://huggingface.co/blog/mayank-mishra/padding-free-transformer. | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62cd5057674cdb524450093d/f67rlrdsKPRTLdXCXoa_X.jpeg",
"fullname": "Mayank Mishra",
"name": "mayank-mishra",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 53,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/62cd5057674cdb524450093d/uMWn2x6tgfUKglqciGe3P.png"
}
] | [] | [
{
"reaction": "❤️",
"users": [
"Kukedlc",
"osanseviero",
"tomaarsen",
"samusenps",
"eramax",
"mayank-mishra",
"julien-c"
],
"count": 7
},
{
"reaction": "🤝",
"users": [
"damerajee",
"julien-c",
"mayank-mishra",
"jeffboudier"
],
"count": 4
}
] | 2024-03-09T16:22:30.000Z | 2024-03-16T19:50:43.571Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6032802e1f993496bc14d9e3/w6hr-DEQot4VVkoyRIBiy.png",
"fullname": "Omar Sanseviero",
"name": "osanseviero",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2868,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a093d63e7d1dda047039fc/QGpVSKuJLwl2EsiffCYML.jpeg",
"fullname": "Olivier Dehaene",
"name": "olivierdehaene",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 78,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62cd5057674cdb524450093d/f67rlrdsKPRTLdXCXoa_X.jpeg",
"fullname": "Mayank Mishra",
"name": "mayank-mishra",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 53,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6487239cca30096ea9f52115/HMte9wjKJgfcxsO-5vb_Q.jpeg",
"fullname": "dame rajee",
"name": "damerajee",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 11,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1641203017724-noauth.png",
"fullname": "Joao Gante",
"name": "joaogante",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 96,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5dd96eb166059660ed1ee413/NQtzmrDdbG0H8qkZvRyGk.jpeg",
"fullname": "Julien Chaumond",
"name": "julien-c",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 1580,
"isFollowing": false
}
] | /posts/mayank-mishra/726784982333766 | 181 | 10 |
289967366439346 | [
{
"type": "text",
"value": "🚀🎭🌟 New Research Alert - ICLR 2024! 🌟🎭 🚀",
"raw": "🚀🎭🌟 New Research Alert - ICLR 2024! 🌟🎭 🚀",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📄 Title: GPAvatar: Generalizable and Precise Head Avatar from Image(s) 🌟🚀",
"raw": "📄 Title: GPAvatar: Generalizable and Precise Head Avatar from Image(s) 🌟🚀",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📝 Description: GPAvatar's objective is to faithfully replicate head avatars while providing precise control over expressions and postures.",
"raw": "📝 Description: GPAvatar's objective is to faithfully replicate head avatars while providing precise control over expressions and postures.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "👥 Authors: Xuangeng Chu et al.",
"raw": "👥 Authors: Xuangeng Chu et al.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📅 Conference: ICLR, May 7-11, 2024 | Vienna, Austria 🇦🇹",
"raw": "📅 Conference: ICLR, May 7-11, 2024 | Vienna, Austria 🇦🇹",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔗 Paper: ",
"raw": "🔗 Paper: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2401.10215",
"href": null,
"resource": {
"type": "paper",
"id": "2401.10215",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2401.10215",
"code": null,
"user": null,
"label": "GPAvatar: Generalizable and Precise Head Avatar from Image(s) (2401.10215)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔗 Github Page: ",
"raw": "🔗 Github Page: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://xg-chu.github.io/project_gpavatar",
"href": "https://xg-chu.github.io/project_gpavatar",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔗 Repository: ",
"raw": "🔗 Repository: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/xg-chu/GPAvatar",
"href": "https://github.com/xg-chu/GPAvatar",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔗 Video: ",
"raw": "🔗 Video: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://www.youtube.com/watch?v=7A3DMaB6Zk0",
"href": "https://www.youtube.com/watch?v=7A3DMaB6Zk0",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📚 More Papers: more cutting-edge research presented at other conferences in the ",
"raw": "📚 More Papers: more cutting-edge research presented at other conferences in the ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers",
"href": null,
"resource": {
"type": "space",
"id": "DmitryRyumin/NewEraAI-Papers",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " curated by ",
"raw": " curated by ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@DmitryRyumin",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "DmitryRyumin",
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🚀 Added to the Avatars Collection: ",
"raw": "🚀 Added to the Avatars Collection: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36",
"href": null,
"resource": {
"type": "collection",
"id": "DmitryRyumin/avatars-65df37cdf81fec13d4dbac36",
"discussionNum": null
},
"url": "https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔍 Keywords: #GPAvatar #MTA #Synthesis #LipSyncing #Expressions #HighResolutionVideos #ICLR2024 #DeepLearning #Animation #Innovation",
"raw": "🔍 Keywords: #GPAvatar #MTA #Synthesis #LipSyncing #Expressions #HighResolutionVideos #ICLR2024 #DeepLearning #Animation #Innovation",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 🚀🎭🌟 New Research Alert - ICLR 2024! 🌟🎭 🚀
📄 Title: GPAvatar: Generalizable and Precise Head Avatar from Image(s) 🌟🚀
📝 Description: GPAvatar's objective is to faithfully replicate head avatars while providing precise control over expressions and postures.
👥 Authors: Xuangeng Chu et al.
📅 Conference: ICLR, May 7-11, 2024 | Vienna, Austria 🇦🇹
🔗 Paper: https://huggingface.co/papers/2401.10215
🔗 Github Page: https://xg-chu.github.io/project_gpavatar
🔗 Repository: https://github.com/xg-chu/GPAvatar
🔗 Video: https://www.youtube.com/watch?v=7A3DMaB6Zk0
📚 More Papers: more cutting-edge research presented at other conferences in the https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers curated by @DmitryRyumin
🚀 Added to the Avatars Collection: https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36
🔍 Keywords: #GPAvatar #MTA #Synthesis #LipSyncing #Expressions #HighResolutionVideos #ICLR2024 #DeepLearning #Animation #Innovation | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg",
"fullname": "Dmitry Ryumin",
"name": "DmitryRyumin",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 377,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/SxU-XFXGes0FDFZy_a6MX.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/KGEz-gtOBrFWGe5KevBpl.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/Qp8aFaqpgBZ-PHTpD4Zf7.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/CL6zFz20dhvfVysnGtEX_.png"
},
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/7kdDh_lnJTltaCWv8sPz-.mp4"
}
] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg",
"fullname": "Dmitry Ryumin",
"name": "DmitryRyumin",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 377
}
] | [
{
"reaction": "❤️",
"users": [
"DmitryRyumin",
"osanseviero",
"samusenps",
"philipp-zettl",
"rreed-pha"
],
"count": 5
}
] | 2024-03-09T12:08:16.000Z | 2024-03-11T23:23:57.360Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6032802e1f993496bc14d9e3/w6hr-DEQot4VVkoyRIBiy.png",
"fullname": "Omar Sanseviero",
"name": "osanseviero",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2868,
"isFollowing": false
}
] | /posts/DmitryRyumin/289967366439346 | 80 | 1 |
616140237851014 | [
{
"type": "text",
"value": "Hi!",
"raw": "Hi!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "We release a series of LLMs named GraphWiz ",
"raw": "We release a series of LLMs named GraphWiz ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/GraphWiz/",
"href": "https://huggingface.co/GraphWiz/",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": ", which is designed for solving different graph problems and outputs their CoT. GraphWiz has surpassed GPT-4 by a large margin in 9 tasks.",
"raw": ", which is designed for solving different graph problems and outputs their CoT. GraphWiz has surpassed GPT-4 by a large margin in 9 tasks.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Let us have a try!",
"raw": "Let us have a try!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Hi!
We release a series of LLMs named GraphWiz https://huggingface.co/GraphWiz/, which is designed for solving different graph problems and outputs their CoT. GraphWiz has surpassed GPT-4 by a large margin in 9 tasks.
Let us have a try! | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64f5a9799bd32ae6c82e0bb4/j7qf4vMJhSLLKqJp5zXig.png",
"fullname": "Chen",
"name": "Nuo97",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 3,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👍",
"users": [
"Nuo97",
"osanseviero",
"Kukedlc",
"samusenps",
"kramp",
"Androidkit",
"yoeldcd"
],
"count": 7
},
{
"reaction": "🚀",
"users": [
"Nuo97"
],
"count": 1
}
] | 2024-03-09T12:00:25.000Z | 2024-03-09T12:00:25.915Z | [] | /posts/Nuo97/616140237851014 | 189 | 0 |
137203945214509 | [
{
"type": "text",
"value": "Last day on Spaces of the Week , ",
"raw": "Last day on Spaces of the Week , ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "and we made it to last place on trending. ",
"raw": "and we made it to last place on trending. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "i really thought it couldnt get any better, but i'm crying ! 😭",
"raw": "i really thought it couldnt get any better, but i'm crying ! 😭",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The thing i like the most about ZeroGPU , ",
"raw": "The thing i like the most about ZeroGPU , ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "inline_code",
"value": null,
"raw": "`import spaces`",
"href": null,
"resource": null,
"url": null,
"code": "import spaces",
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " , is that i dont have to always check to see if someone decided to test if i have hard character limits , and it reloads the application flawlessly . ",
"raw": " , is that i dont have to always check to see if someone decided to test if i have hard character limits , and it reloads the application flawlessly . ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "drop a like on my spaces here : ",
"raw": "drop a like on my spaces here : ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Spaces of the Week : ",
"raw": "Spaces of the Week : ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/spaces/tonic/starcoder2",
"href": "https://huggingface.co/spaces/tonic/starcoder2",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "9 other ZeroGPU demos : ",
"raw": "9 other ZeroGPU demos : ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/tonic",
"href": "https://huggingface.co/tonic",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Last day on Spaces of the Week ,
and we made it to last place on trending.
i really thought it couldnt get any better, but i'm crying ! 😭
The thing i like the most about ZeroGPU , `import spaces` , is that i dont have to always check to see if someone decided to test if i have hard character limits , and it reloads the application flawlessly .
drop a like on my spaces here :
Spaces of the Week : https://huggingface.co/spaces/tonic/starcoder2
9 other ZeroGPU demos : https://huggingface.co/tonic | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a3bb1cd0d8c2c2169f0b88/eT2TS0IlQbZtz-F_zHLz9.jpeg",
"fullname": "Joseph [open/acc] Pollack",
"name": "Tonic",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 313,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/62a3bb1cd0d8c2c2169f0b88/A06xmjaR9OUlcyUHCeQee.png"
}
] | [] | [
{
"reaction": "❤️",
"users": [
"osanseviero",
"samusenps",
"Solshine"
],
"count": 3
},
{
"reaction": "👍",
"users": [
"mvaloatto"
],
"count": 1
},
{
"reaction": "🤗",
"users": [
"osanseviero"
],
"count": 1
}
] | 2024-03-09T09:37:33.000Z | 2024-03-09T09:37:33.162Z | [] | /posts/Tonic/137203945214509 | 63 | 0 |
269615058972727 | [
{
"type": "text",
"value": "I remember very well that about two years ago, 0-shot named entity recognition (i.e. where you can choose any labels on the fly) was completely infeasible. Fast forward a year, and ",
"raw": "I remember very well that about two years ago, 0-shot named entity recognition (i.e. where you can choose any labels on the fly) was completely infeasible. Fast forward a year, and ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/Universal-NER/UniNER-7B-all",
"href": null,
"resource": {
"type": "model",
"id": "Universal-NER/UniNER-7B-all",
"discussionNum": null
},
"url": "https://huggingface.co/Universal-NER/UniNER-7B-all",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " surprised me by showing that 0-shot NER is possible! However, I had a bunch of concerns that prevented me from ever adopting it myself. For example, the model was 7B parameters, only worked with 1 custom label at a time, and it had a cc-by-nc-4.0 license.",
"raw": " surprised me by showing that 0-shot NER is possible! However, I had a bunch of concerns that prevented me from ever adopting it myself. For example, the model was 7B parameters, only worked with 1 custom label at a time, and it had a cc-by-nc-4.0 license.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Since then, a little known research paper introduced GLiNER, which was a modified & finetuned variant of the ",
"raw": "Since then, a little known research paper introduced GLiNER, which was a modified & finetuned variant of the ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/microsoft/deberta-v3-base",
"href": null,
"resource": {
"type": "model",
"id": "microsoft/deberta-v3-base",
"discussionNum": null
},
"url": "https://huggingface.co/microsoft/deberta-v3-base",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " line of models. Notably, GLiNER outperforms UniNER-7B, despite being almost 2 orders of magnitude smaller! It also allows for multiple labels at once, supports nested NER, and the models are Apache 2.0.",
"raw": " line of models. Notably, GLiNER outperforms UniNER-7B, despite being almost 2 orders of magnitude smaller! It also allows for multiple labels at once, supports nested NER, and the models are Apache 2.0.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Very recently, the models were uploaded to Hugging Face, and I was inspired to create a demo for the English model. The demo runs on CPU, and can still very efficiently compute labels with great performance. I'm very impressed at the models.",
"raw": "Very recently, the models were uploaded to Hugging Face, and I was inspired to create a demo for the English model. The demo runs on CPU, and can still very efficiently compute labels with great performance. I'm very impressed at the models.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "There are two models right now:",
"raw": "There are two models right now:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "* base (english): ",
"raw": "* base (english): ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/urchade/gliner_base",
"href": null,
"resource": {
"type": "model",
"id": "urchade/gliner_base",
"discussionNum": null
},
"url": "https://huggingface.co/urchade/gliner_base",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "* multi (multilingual): ",
"raw": "* multi (multilingual): ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/urchade/gliner_multi",
"href": null,
"resource": {
"type": "model",
"id": "urchade/gliner_multi",
"discussionNum": null
},
"url": "https://huggingface.co/urchade/gliner_multi",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "And my demo to experiment with the base model can be found here: ",
"raw": "And my demo to experiment with the base model can be found here: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/spaces/tomaarsen/gliner_base",
"href": "https://huggingface.co/spaces/tomaarsen/gliner_base",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | I remember very well that about two years ago, 0-shot named entity recognition (i.e. where you can choose any labels on the fly) was completely infeasible. Fast forward a year, and https://huggingface.co/Universal-NER/UniNER-7B-all surprised me by showing that 0-shot NER is possible! However, I had a bunch of concerns that prevented me from ever adopting it myself. For example, the model was 7B parameters, only worked with 1 custom label at a time, and it had a cc-by-nc-4.0 license.
Since then, a little known research paper introduced GLiNER, which was a modified & finetuned variant of the https://huggingface.co/microsoft/deberta-v3-base line of models. Notably, GLiNER outperforms UniNER-7B, despite being almost 2 orders of magnitude smaller! It also allows for multiple labels at once, supports nested NER, and the models are Apache 2.0.
Very recently, the models were uploaded to Hugging Face, and I was inspired to create a demo for the English model. The demo runs on CPU, and can still very efficiently compute labels with great performance. I'm very impressed at the models.
There are two models right now:
* base (english): https://huggingface.co/urchade/gliner_base
* multi (multilingual): https://huggingface.co/urchade/gliner_multi
And my demo to experiment with the base model can be found here: https://huggingface.co/spaces/tomaarsen/gliner_base | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6317233cc92fd6fee317e030/cJHSvvimr1kqgQfHOjO5n.png",
"fullname": "Tom Aarsen",
"name": "tomaarsen",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 1060,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6317233cc92fd6fee317e030/epIXE9-2IOCQ1hlnASY3F.png"
}
] | [] | [
{
"reaction": "❤️",
"users": [
"osanseviero",
"MexIvanov",
"urchade",
"rajistics",
"samusenps",
"ajibawa-2023",
"danielus",
"nickprock",
"polodealvarado",
"deepsh2207",
"luqman2511",
"chokamp",
"Poulain",
"Godspeed-AI",
"eek",
"louisguitton"
],
"count": 16
},
{
"reaction": "🤝",
"users": [
"rreed-pha",
"polodealvarado"
],
"count": 2
},
{
"reaction": "🤯",
"users": [
"Tonic",
"erickdp"
],
"count": 2
}
] | 2024-03-08T20:01:23.000Z | 2024-03-10T01:29:02.684Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1652986473945-60f2e74cadf471cbdf8bb663.jpeg",
"fullname": "Rajiv Shah",
"name": "rajistics",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 60,
"isFollowing": false
},
{
"avatarUrl": "/avatars/965844ce3c3713463a58932f434ac9ac.svg",
"fullname": "Deepanshu Sharma",
"name": "deepsh2207",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62111fdbe1d974ee5bcbfa27/YUzX6lBvW8pbxDorx1kgV.png",
"fullname": "Urchade Zaratiana",
"name": "urchade",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 149,
"isFollowing": false
}
] | /posts/tomaarsen/269615058972727 | 475 | 3 |
Subsets and Splits