{"id":13560158,"url":"https://github.com/OpenGVLab/Multi-Modality-Arena","last_synced_at":"2025-04-03T15:31:56.981Z","repository":{"id":163995899,"uuid":"638869686","full_name":"OpenGVLab/Multi-Modality-Arena","owner":"OpenGVLab","description":"Chatbot Arena meets multi-modality! Multi-Modality Arena allows you to benchmark vision-language models side-by-side while providing images as inputs. Supports MiniGPT-4, LLaMA-Adapter V2, LLaVA, BLIP-2, and many more!","archived":false,"fork":false,"pushed_at":"2024-04-21T11:14:46.000Z","size":22550,"stargazers_count":463,"open_issues_count":22,"forks_count":35,"subscribers_count":6,"default_branch":"main","last_synced_at":"2024-11-04T11:38:51.230Z","etag":null,"topics":["chat","chatbot","chatgpt","gradio","large-language-models","llms","multi-modality","vision-language-model","vqa"],"latest_commit_sha":null,"homepage":"","language":"Python","has_issues":true,"has_wiki":null,"has_pages":null,"mirror_url":null,"source_name":null,"license":null,"status":null,"scm":"git","pull_requests_enabled":true,"icon_url":"https://github.com/OpenGVLab.png","metadata":{"files":{"readme":"README.md","changelog":null,"contributing":null,"funding":null,"license":null,"code_of_conduct":null,"threat_model":null,"audit":null,"citation":null,"codeowners":null,"security":null,"support":null,"governance":null,"roadmap":null,"authors":null,"dei":null,"publiccode":null,"codemeta":null}},"created_at":"2023-05-10T09:26:08.000Z","updated_at":"2024-10-31T14:24:23.000Z","dependencies_parsed_at":"2024-08-01T13:28:14.305Z","dependency_job_id":null,"html_url":"https://github.com/OpenGVLab/Multi-Modality-Arena","commit_stats":null,"previous_names":[],"tags_count":0,"template":false,"template_full_name":null,"repository_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/repositories/OpenGVLab%2FMulti-Modality-Arena","tags_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/repositories/OpenGVLab%2FMulti-Modality-Arena/tags","releases_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/repositories/OpenGVLab%2FMulti-Modality-Arena/releases","manifests_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/repositories/OpenGVLab%2FMulti-Modality-Arena/manifests","owner_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/owners/OpenGVLab","download_url":"https://codeload.github.com/OpenGVLab/Multi-Modality-Arena/tar.gz/refs/heads/main","host":{"name":"GitHub","url":"https://github.com","kind":"github","repositories_count":247028052,"owners_count":20871641,"icon_url":"https://github.com/github.png","version":null,"created_at":"2022-05-30T11:31:42.601Z","updated_at":"2022-07-04T15:15:14.044Z","host_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub","repositories_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/repositories","repository_names_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/repository_names","owners_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/owners"}},"keywords":["chat","chatbot","chatgpt","gradio","large-language-models","llms","multi-modality","vision-language-model","vqa"],"created_at":"2024-08-01T13:00:38.500Z","updated_at":"2025-04-03T15:31:51.962Z","avatar_url":"https://github.com/OpenGVLab.png","language":"Python","readme":null,"funding_links":[],"categories":["🎭 Multi-modal Testing","Data Source ![](https://img.shields.io/badge/Data_Source-yellow)","Datasets-or-Benchmark","Python"],"sub_categories":["Visual Question Answering Datasets","多模态-跨模态"],"project_url":"https://awesome.ecosyste.ms/api/v1/projects/github.com%2FOpenGVLab%2FMulti-Modality-Arena","html_url":"https://awesome.ecosyste.ms/projects/github.com%2FOpenGVLab%2FMulti-Modality-Arena","lists_url":"https://awesome.ecosyste.ms/api/v1/projects/github.com%2FOpenGVLab%2FMulti-Modality-Arena/lists"}