{"id":13442500,"url":"https://github.com/OpenGVLab/InternVideo","last_synced_at":"2025-03-20T14:31:20.973Z","repository":{"id":64358066,"uuid":"569713120","full_name":"OpenGVLab/InternVideo","owner":"OpenGVLab","description":"[ECCV2024] Video Foundation Models \u0026 Data for Multimodal Understanding","archived":false,"fork":false,"pushed_at":"2024-12-07T16:20:41.000Z","size":55790,"stargazers_count":1452,"open_issues_count":92,"forks_count":90,"subscribers_count":27,"default_branch":"main","last_synced_at":"2024-12-09T08:52:15.241Z","etag":null,"topics":["action-recognition","benchmark","contrastive-learning","foundation-models","instruction-tuning","masked-autoencoder","multimodal","open-set-recognition","self-supervised","spatio-temporal-action-localization","temporal-action-localization","video-clip","video-data","video-dataset","video-question-answering","video-retrieval","video-understanding","vision-transformer","zero-shot-classification","zero-shot-retrieval"],"latest_commit_sha":null,"homepage":"","language":"Python","has_issues":true,"has_wiki":null,"has_pages":null,"mirror_url":null,"source_name":null,"license":"apache-2.0","status":null,"scm":"git","pull_requests_enabled":true,"icon_url":"https://github.com/OpenGVLab.png","metadata":{"files":{"readme":"README.md","changelog":null,"contributing":null,"funding":null,"license":"LICENSE","code_of_conduct":null,"threat_model":null,"audit":null,"citation":null,"codeowners":null,"security":null,"support":null,"governance":null,"roadmap":null,"authors":null,"dei":null,"publiccode":null,"codemeta":null}},"created_at":"2022-11-23T12:57:00.000Z","updated_at":"2024-12-09T02:41:46.000Z","dependencies_parsed_at":"2025-01-14T06:35:28.126Z","dependency_job_id":null,"html_url":"https://github.com/OpenGVLab/InternVideo","commit_stats":null,"previous_names":[],"tags_count":0,"template":false,"template_full_name":null,"repository_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/repositories/OpenGVLab%2FInternVideo","tags_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/repositories/OpenGVLab%2FInternVideo/tags","releases_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/repositories/OpenGVLab%2FInternVideo/releases","manifests_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/repositories/OpenGVLab%2FInternVideo/manifests","owner_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/owners/OpenGVLab","download_url":"https://codeload.github.com/OpenGVLab/InternVideo/tar.gz/refs/heads/main","host":{"name":"GitHub","url":"https://github.com","kind":"github","repositories_count":244630130,"owners_count":20484320,"icon_url":"https://github.com/github.png","version":null,"created_at":"2022-05-30T11:31:42.601Z","updated_at":"2022-07-04T15:15:14.044Z","host_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub","repositories_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/repositories","repository_names_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/repository_names","owners_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/owners"}},"keywords":["action-recognition","benchmark","contrastive-learning","foundation-models","instruction-tuning","masked-autoencoder","multimodal","open-set-recognition","self-supervised","spatio-temporal-action-localization","temporal-action-localization","video-clip","video-data","video-dataset","video-question-answering","video-retrieval","video-understanding","vision-transformer","zero-shot-classification","zero-shot-retrieval"],"created_at":"2024-07-31T03:01:46.501Z","updated_at":"2025-03-20T14:31:15.963Z","avatar_url":"https://github.com/OpenGVLab.png","language":"Python","readme":null,"funding_links":[],"categories":["Python","HarmonyOS"],"sub_categories":["Windows Manager"],"project_url":"https://awesome.ecosyste.ms/api/v1/projects/github.com%2FOpenGVLab%2FInternVideo","html_url":"https://awesome.ecosyste.ms/projects/github.com%2FOpenGVLab%2FInternVideo","lists_url":"https://awesome.ecosyste.ms/api/v1/projects/github.com%2FOpenGVLab%2FInternVideo/lists"}