{"id":27984402,"url":"https://github.com/SqueezeAILab/KVQuant","last_synced_at":"2025-05-08T05:01:59.084Z","repository":{"id":220183239,"uuid":"750973875","full_name":"SqueezeAILab/KVQuant","owner":"SqueezeAILab","description":"[NeurIPS 2024] KVQuant: Towards 10 Million Context Length LLM Inference with KV Cache Quantization","archived":false,"fork":false,"pushed_at":"2024-08-13T11:19:28.000Z","size":20767,"stargazers_count":342,"open_issues_count":15,"forks_count":30,"subscribers_count":10,"default_branch":"main","last_synced_at":"2025-04-30T02:04:45.176Z","etag":null,"topics":["compression","efficient-inference","efficient-model","large-language-models","llama","llm","localllama","localllm","mistral","model-compression","natural-language-processing","quantization","small-models","text-generation","transformer"],"latest_commit_sha":null,"homepage":"https://arxiv.org/abs/2401.18079","language":"Python","has_issues":true,"has_wiki":null,"has_pages":null,"mirror_url":null,"source_name":null,"license":null,"status":null,"scm":"git","pull_requests_enabled":true,"icon_url":"https://github.com/SqueezeAILab.png","metadata":{"files":{"readme":"README.md","changelog":null,"contributing":null,"funding":null,"license":null,"code_of_conduct":null,"threat_model":null,"audit":null,"citation":null,"codeowners":null,"security":null,"support":null,"governance":null,"roadmap":null,"authors":null,"dei":null,"publiccode":null,"codemeta":null}},"created_at":"2024-01-31T17:30:10.000Z","updated_at":"2025-04-25T07:09:50.000Z","dependencies_parsed_at":"2024-08-18T18:15:53.373Z","dependency_job_id":null,"html_url":"https://github.com/SqueezeAILab/KVQuant","commit_stats":{"total_commits":11,"total_committers":3,"mean_commits":"3.6666666666666665","dds":0.5454545454545454,"last_synced_commit":"57a238357f0ffe50084670fcd5781c9848f80ea2"},"previous_names":["squeezeailab/kvquant"],"tags_count":0,"template":false,"template_full_name":null,"repository_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/repositories/SqueezeAILab%2FKVQuant","tags_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/repositories/SqueezeAILab%2FKVQuant/tags","releases_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/repositories/SqueezeAILab%2FKVQuant/releases","manifests_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/repositories/SqueezeAILab%2FKVQuant/manifests","owner_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/owners/SqueezeAILab","download_url":"https://codeload.github.com/SqueezeAILab/KVQuant/tar.gz/refs/heads/main","host":{"name":"GitHub","url":"https://github.com","kind":"github","repositories_count":253002856,"owners_count":21838640,"icon_url":"https://github.com/github.png","version":null,"created_at":"2022-05-30T11:31:42.601Z","updated_at":"2022-07-04T15:15:14.044Z","host_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub","repositories_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/repositories","repository_names_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/repository_names","owners_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/owners"}},"keywords":["compression","efficient-inference","efficient-model","large-language-models","llama","llm","localllama","localllm","mistral","model-compression","natural-language-processing","quantization","small-models","text-generation","transformer"],"created_at":"2025-05-08T05:01:50.379Z","updated_at":"2025-05-08T05:01:54.054Z","avatar_url":"https://github.com/SqueezeAILab.png","language":"Python","readme":null,"funding_links":[],"categories":["A01_文本生成_文本对话"],"sub_categories":["大语言对话模型及数据"],"project_url":"https://awesome.ecosyste.ms/api/v1/projects/github.com%2FSqueezeAILab%2FKVQuant","html_url":"https://awesome.ecosyste.ms/projects/github.com%2FSqueezeAILab%2FKVQuant","lists_url":"https://awesome.ecosyste.ms/api/v1/projects/github.com%2FSqueezeAILab%2FKVQuant/lists"}