{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"gpt4all","owner":"nomic-ai","isFork":false,"description":"gpt4all: run open-source LLMs anywhere","allTopics":["llm-inference"],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":17,"issueCount":408,"starsCount":65678,"forksCount":7232,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-07T23:13:21.014Z"}},{"type":"Public","name":"llama.cpp","owner":"nomic-ai","isFork":true,"description":"Nomic Vulkan Fork of LLaMa.cpp","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":0,"starsCount":39,"forksCount":8522,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-04T18:41:37.403Z"}},{"type":"Public","name":"kompute","owner":"nomic-ai","isFork":true,"description":"General purpose GPU compute framework built on Vulkan to support 1000s of cross vendor graphics cards (AMD, Qualcomm, NVIDIA & friends). Blazing fast, mobile-enabled, asynchronous and optimized for advanced GPU data processing usecases. Backed by the Linux Foundation.","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":1,"issueCount":0,"starsCount":30,"forksCount":132,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-08T15:24:35.359Z"}},{"type":"Public","name":"linuxdeployqt","owner":"nomic-ai","isFork":true,"description":"Makes Linux applications self-contained by copying in the libraries and plugins that the application uses, and optionally generates an AppImage. Can be used for Qt and other applications","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":0,"starsCount":5,"forksCount":401,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-09-26T20:06:50.203Z"}},{"type":"Public archive","name":"pygpt4all","owner":"nomic-ai","isFork":false,"description":"Official supported Python bindings for llama.cpp + gpt4all","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":1,"issueCount":19,"starsCount":1025,"forksCount":159,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-05-12T13:46:55.071Z"}},{"type":"Public archive","name":"gpt4all-chat","owner":"nomic-ai","isFork":false,"description":"gpt4all-j chat","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":1,"issueCount":31,"starsCount":1265,"forksCount":150,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-05-10T16:03:02.041Z"}}],"repositoryCount":6,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}