{"id":13512210,"url":"https://github.com/basicmi/AI-Chip","last_synced_at":"2025-03-30T22:32:11.826Z","repository":{"id":41190220,"uuid":"99764044","full_name":"basicmi/AI-Chip","owner":"basicmi","description":"A list of ICs and IPs for AI, Machine Learning and Deep Learning.","archived":false,"fork":false,"pushed_at":"2024-06-05T21:41:23.000Z","size":116296,"stargazers_count":1657,"open_issues_count":24,"forks_count":275,"subscribers_count":266,"default_branch":"master","last_synced_at":"2025-03-24T20:13:07.281Z","etag":null,"topics":["ai-chips","chip","deep-learning","machine-learning","processor"],"latest_commit_sha":null,"homepage":"https://basicmi.github.io/AI-Chip/","language":"PHP","has_issues":true,"has_wiki":null,"has_pages":null,"mirror_url":null,"source_name":null,"license":null,"status":null,"scm":"git","pull_requests_enabled":true,"icon_url":"https://github.com/basicmi.png","metadata":{"files":{"readme":"README.md","changelog":null,"contributing":null,"funding":null,"license":null,"code_of_conduct":null,"threat_model":null,"audit":null,"citation":null,"codeowners":null,"security":null,"support":null,"governance":null,"roadmap":null,"authors":null,"dei":null,"publiccode":null,"codemeta":null}},"created_at":"2017-08-09T04:10:54.000Z","updated_at":"2025-03-16T05:16:26.000Z","dependencies_parsed_at":"2023-01-31T02:15:26.114Z","dependency_job_id":"96dc8ef1-7669-4b26-ad51-5b1662d97adb","html_url":"https://github.com/basicmi/AI-Chip","commit_stats":{"total_commits":763,"total_committers":3,"mean_commits":"254.33333333333334","dds":0.03669724770642202,"last_synced_commit":"5ddedc71cdfd73f256ddfbedb8629e1eed929e49"},"previous_names":[],"tags_count":0,"template":false,"template_full_name":null,"repository_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/repositories/basicmi%2FAI-Chip","tags_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/repositories/basicmi%2FAI-Chip/tags","releases_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/repositories/basicmi%2FAI-Chip/releases","manifests_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/repositories/basicmi%2FAI-Chip/manifests","owner_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/owners/basicmi","download_url":"https://codeload.github.com/basicmi/AI-Chip/tar.gz/refs/heads/master","host":{"name":"GitHub","url":"https://github.com","kind":"github","repositories_count":246390895,"owners_count":20769475,"icon_url":"https://github.com/github.png","version":null,"created_at":"2022-05-30T11:31:42.601Z","updated_at":"2022-07-04T15:15:14.044Z","host_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub","repositories_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/repositories","repository_names_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/repository_names","owners_url":"https://repos.ecosyste.ms/api/v1/hosts/GitHub/owners"}},"keywords":["ai-chips","chip","deep-learning","machine-learning","processor"],"created_at":"2024-08-01T03:01:35.996Z","updated_at":"2025-03-30T22:32:06.817Z","avatar_url":"https://github.com/basicmi.png","language":"PHP","readme":"\u003cdiv align=\"center\"\u003e\u003ch1\u003eAI Chip (ICs and IPs)\u003c/h1\u003e\u003c/div\u003e\n\u003cdiv align=\"center\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/AI-chips.png\"\u003e\u003c/div\u003e\n\u003cbr\u003e\n\u003cdiv align=\"center\"\u003eEditor \u003ca href=\"https://www.linkedin.com/in/shan-tang-27342510/\"\u003e\u003cstrong\u003eS.T.\u003c/strong\u003e\u003c/a\u003e(Linkedin)\u003c/div\u003e\n\u003cdiv align=\"center\"\u003e\u003cstrong\u003eWelcome to My Wechat Blog \u003ca href=\"[https://mp.weixin.qq.com/s/axfIBbQBDhTJ2Zt7U5WQBw](https://mp.weixin.qq.com/mp/appmsgalbum?action=getalbum\u0026__biz=MzI3MDQ2MjA3OA==\u0026scene=1\u0026album_id=1374108991751782402\u0026count=3#wechat_redirect)\"\u003eStarryHeavensAbove\u003c/a\u003e for more AI chip related articles\u003c/strong\u003e\u003c/div\u003e\n\u003cdiv align=\"center\"\u003e\u003cstrong\u003e欢迎访问我的微信公众号 \u003ca href=\"[https://mp.weixin.qq.com/s/axfIBbQBDhTJ2Zt7U5WQBw](https://mp.weixin.qq.com/mp/appmsgalbum?action=getalbum\u0026__biz=MzI3MDQ2MjA3OA==\u0026scene=1\u0026album_id=1374108991751782402\u0026count=3#wechat_redirect)\"\u003eStarryHeavensAbove\u003c/a\u003e\u003c/strong\u003e\u003c/div\u003e\n\u003cdiv align=\"center\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/qrcode_for_weichat_258.jpg\" height=\"100\"\u003e\u003c/div\u003e\n\u003cdiv align=\"center\"\u003e\u003ch1\u003e \u003c/h1\u003e\u003c/div\u003e\n\u003cdiv align=\"center\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/AI_Chip_Landscape_v0p7.png\"\u003e\u003c/div\u003e\n\u003cdiv align=\"center\"\u003e\u003ch1\u003e \u003c/h1\u003e\u003c/div\u003e\n \n\u003cdiv align=\"center\"\u003e\u003ch2\u003eLatest updates\u003c/h2\u003e\u003c/div\u003e\n\u003cHR\u003e\n\n\u003cfont color=\"Darkred\"\u003e\n\u003cul\u003e\n\u003cli\u003eAdd news of \u003ca href=\"#SambaNova\"\u003eSambaNova\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd news of \u003ca href=\"#Groq\"\u003eGroq\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd news of \u003ca href=\"#d-matrix\"\u003ed-Matrix\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd news of \u003ca href=\"#Neureality\"\u003eNeureality\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd news of \u003ca href=\"#Qualcomm\"\u003eQualcomm\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd news of \u003ca href=\"#Nvidia\"\u003eNvidia\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd news of \u003ca href=\"#Cerebras\"\u003eCerebras\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd link to \u003ca href=\"#AIChipBenchmarks\"\u003eLatest MLPerf Results from MLCommons\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd news of \u003ca href=\"#IBM\"\u003eIBM AIU\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd news of \u003ca href=\"#Tesla\"\u003eTesla Dojo\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd link to \u003ca href=\"#AIChipBenchmarks\"\u003eLatest MLPerf Results from MLCommons\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd news of \u003ca href=\"#Cerebras\"\u003eCerebras\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd startup \u003ca href=\"#d-matrix\"\u003ed-Matrix\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd news of \u003ca href=\"#Tachyum\"\u003eTachyum Prodigy Universal Processor\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd news of \u003ca href=\"#Habana\"\u003eIntel Habana Gaudi®2\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd startup \u003ca href=\"#Modular\"\u003eModular AI in AI compiler section\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd startup \u003ca href=\"#Teramem\"\u003eTeraMem\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd startup \u003ca href=\"#Aspinity\"\u003eAspinity\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd news of \u003ca href=\"#Synopsys\"\u003eSynopsys DesignWare ARC NPX6 NPU IP\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd news of \u003ca href=\"#Nvidia\"\u003eNvidia Hopper\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd news of \u003ca href=\"#Graphcore\"\u003eGraphcore\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd startup \u003ca href=\"#Ceremorphic\"\u003eCeremorphic\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd news of \u003ca href=\"#Lightelligence\"\u003eLightelligence\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd link to \u003ca href=\"#AIChipBenchmarks\"\u003eLatest MLPerf Results from MLCommons\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd news of \u003ca href=\"#Cerebras\"\u003eCerebras\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd news of \u003ca href=\"#Habana\"\u003eHabana\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd news of \u003ca href=\"#Google\"\u003eGoogle Tensor Chip\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd news of \u003ca href=\"#Intel\"\u003eIntel Loihi 2\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd news of \u003ca href=\"#Tesla\"\u003eTesla Dojo\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd news of \u003ca href=\"#Untether\"\u003eUntether AI\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd startup \u003ca href=\"#Innatera\"\u003eInnatera Nanosystems\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd startup \u003ca href=\"#EdgeQ\"\u003eEdgeQ\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd startup \u003ca href=\"#Quadric\"\u003eQuadric\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd startup \u003ca href=\"#AnalogInference\"\u003eAnalog Inference\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd news of \u003ca href=\"#Tenstorrent\"\u003eTenstorrent\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd news of \u003ca href=\"#Google\"\u003eGoogle\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd news of \u003ca href=\"#SiMa\"\u003eSiMa.ai\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd startup \u003ca href=\"#Neureality\"\u003eNeureality\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd news of \u003ca href=\"#Cerebras\"\u003eCerebras\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd news of \u003ca href=\"#Groq\"\u003eGroq\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd news of \u003ca href=\"#Nvidia\"\u003eNvidia\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eAdd news of \u003ca href=\"#SambaNova\"\u003eSambaNova\u003c/a\u003e.\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/font\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch1\u003e \u003c/h1\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch2\u003eShortcut\u003c/h2\u003e\u003c/div\u003e\n\u003cHR\u003e\n\u003ctable style=\"width:100%\"\u003e\n  \u003ctr\u003e\n    \u003cth\u003e\u003ca href=\"#IC_Vendors\"\u003eIC Vendors\u003c/a\u003e\u003c/th\u003e\u003ctd\u003e\u003ca href=\"#Intel\"\u003eIntel\u003c/a\u003e, \u003ca href=\"#Qualcomm\"\u003eQualcomm\u003c/a\u003e, \u003ca href=\"#Nvidia\"\u003eNvidia\u003c/a\u003e, \u003ca href=\"#Samsung\"\u003eSamsung\u003c/a\u003e, \u003ca href=\"#AMD\"\u003eAMD\u003c/a\u003e,\u003ca href=\"#IBM\"\u003eIBM\u003c/a\u003e, \u003ca href=\"#Marvell\"\u003eMarvell\u003c/a\u003e\u003c/td\u003e\n  \u003c/tr\u003e\n  \u003ctr\u003e\n    \u003cth\u003e\u003ca href=\"#Tech_Giants\"\u003eTech Giants \u0026 HPC Vendors\u003c/a\u003e\u003c/th\u003e\u003ctd\u003e\u003ca href=\"#Google\"\u003eGoogle\u003c/a\u003e, \u003ca href=\"#Amazon_AWS\"\u003eAmazon_AWS\u003c/a\u003e, \u003ca href=\"#Microsoft\"\u003eMicrosoft\u003c/a\u003e, \u003ca href=\"#Apple\"\u003eApple\u003c/a\u003e, \u003ca href=\"#Alibaba\"\u003eAlibaba Group\u003c/a\u003e, \u003ca href=\"#Tencent_Cloud\"\u003eTencent Cloud\u003c/a\u003e, \u003ca href=\"#Baidu\"\u003eBaidu\u003c/a\u003e, \u003ca href=\"#Fujitsu\"\u003eFujitsu\u003c/a\u003e, \u003ca href=\"#Nokia\"\u003eNokia\u003c/a\u003e, \u003ca href=\"#Facebook\"\u003eFacebook\u003c/a\u003e, \u003ca href=\"#Tesla\"\u003eTesla\u003c/a\u003e\u003c/td\u003e\n  \u003c/tr\u003e\n  \u003ctr\u003e\n    \u003cth\u003e\u003ca href=\"#IP_Vendors\"\u003eIP Vendors\u003c/a\u003e\u003c/th\u003e\u003ctd\u003e\u003ca href=\"#ARM\"\u003eARM\u003c/a\u003e, \u003ca href=\"#Synopsys\"\u003eSynopsys\u003c/a\u003e, \u003ca href=\"#Imagination\"\u003eImagination\u003c/a\u003e, \u003ca href=\"#CEVA\"\u003eCEVA\u003c/a\u003e, \u003ca href=\"#Cadence\"\u003eCadence\u003c/a\u003e, \u003ca href=\"#VeriSilicon\"\u003eVeriSilicon\u003c/a\u003e\u003c/td\u003e\n  \u003c/tr\u003e\n  \u003ctr\u003e  \n    \u003cth\u003e\u003ca href=\"#Startups_Worldwide\"\u003eStartups\u003c/a\u003e\u003c/th\u003e\n    \u003ctd\u003e\u003ca href=\"#Cerebras\"\u003eCerebras\u003c/a\u003e, \u003ca href=\"#Graphcore\"\u003eGraphcore\u003c/a\u003e, \u003ca href=\"#Tenstorrent\"\u003eTenstorrent\u003c/a\u003e, \u003ca href=\"#Blaize\"\u003eBlaize\u003c/a\u003e, \u003ca href=\"#Koniku\"\u003eKoniku\u003c/a\u003e, \u003ca href=\"#Adapteva\"\u003eAdapteva\u003c/a\u003e, \u003ca href=\"#Mythic\"\u003eMythic\u003c/a\u003e, \u003ca href=\"#Brainchip\"\u003eBrainChip\u003c/a\u003e, \u003ca href=\"#Leepmind\"\u003eLeepmind\u003c/a\u003e, \u003ca href=\"#Groq\"\u003eGroq\u003c/a\u003e, \u003ca href=\"#Kneron\"\u003eKneron\u003c/a\u003e, \u003ca href=\"#Esperanto\"\u003eEsperanto Technologies\u003c/a\u003e, \u003ca href=\"#GTI\"\u003eGyrfalcon Technology\u003c/a\u003e, \u003ca href=\"#SambaNova\"\u003eSambaNova Systems\u003c/a\u003e, \u003ca href=\"#GreenWaves\"\u003eGreenWaves Technology\u003c/a\u003e, \u003ca href=\"#Lightelligence\"\u003eLightelligence\u003c/a\u003e, \u003ca href=\"#Lightmatter\"\u003eLightmatter\u003c/a\u003e, \u003ca href=\"#Hailo\"\u003eHailo\u003c/a\u003e,\u003ca href=\"#Tachyum\"\u003eTachyum\u003c/a\u003e,\u003ca href=\"#Alphaics\"\u003eAlphaICs\u003c/a\u003e,\u003ca href=\"#Syntiant\"\u003eSyntiant\u003c/a\u003e, \u003ca href=\"#aiCTX\"\u003eaiCTX\u003c/a\u003e, \u003ca href=\"#Flexlogix\"\u003eFlex Logix\u003c/a\u003e, \u003ca href=\"#PFN\"\u003ePreferred Network\u003c/a\u003e, \u003ca href=\"#Cornami\"\u003eCornami\u003c/a\u003e, \u003ca href=\"#Anaflash\"\u003eAnaflash\u003c/a\u003e, \u003ca href=\"#Optalysys\"\u003eOptaylsys\u003c/a\u003e, \u003ca href=\"#etacompute\"\u003eEta Compute\u003c/a\u003e, \u003ca href=\"#Achronix\"\u003eAchronix\u003c/a\u003e, \u003ca href=\"#Areanna\"\u003eAreanna AI\u003c/a\u003e, \u003ca href=\"#Neuroblade\"\u003eNeuroblade\u003c/a\u003e, \u003ca href=\"#Luminous\"\u003eLuminous Computing\u003c/a\u003e, \u003ca href=\"#Efinix\"\u003eEfinix\u003c/a\u003e, \u003ca href=\"#AIstorm\"\u003eAISTORM\u003c/a\u003e, \u003ca href=\"#SiMa\"\u003eSiMa.ai\u003c/a\u003e,\u003ca href=\"#Untether\"\u003eUntether AI\u003c/a\u003e, \u003ca href=\"#GrAI\"\u003eGrAI Matter Lab\u003c/a\u003e, \u003ca href=\"#Rain\"\u003eRain Neuromorphics\u003c/a\u003e, \u003ca href=\"#ABR\"\u003eApplied Brain Research\u003c/a\u003e, \u003ca href=\"#Xmos\"\u003eXMOS\u003c/a\u003e, \u003ca href=\"#DinoplusAI\"\u003eDinoPlusAI\u003c/a\u003e, \u003ca href=\"#Furiosa\"\u003eFuriosa AI\u003c/a\u003e, \u003ca href=\"#Perceive\"\u003ePerceive\u003c/a\u003e, \u003ca href=\"#SimpleMachines\"\u003eSimpleMachines\u003c/a\u003e, \u003ca href=\"#Neureality\"\u003eNeureality\u003c/a\u003e, \u003ca href=\"#AnalogInference\"\u003eAnalog Inference\u003c/a\u003e, \u003ca href=\"#Quadric\"\u003eQuadric\u003c/a\u003e, \u003ca href=\"#EdgeQ\"\u003eEdgeQ\u003c/a\u003e, \u003ca href=\"#Innatera\"\u003eInnatera Nanosystems\u003c/a\u003e, \u003ca href=\"#Ceremorphic\"\u003eCeremorphic\u003c/a\u003e, \u003ca href=\"#Aspinity\"\u003eAspinity\u003c/a\u003e, \u003ca href=\"#Teramem\"\u003eTeraMem, \u003ca href=\"#d-matrix\"\u003ed-Matrix\u003c/a\u003e\u003c/a\u003e\u003c/td\u003e\n  \u003c/tr\u003e\n\u003c/table\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch1\u003e \u003c/h1\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch2\u003e\u003ca name=\"IC_Vendors\"\u003e\u003c/a\u003eI. IC Vendors\u003c/h2\u003e\u003c/div\u003e\n\u003cHR\u003e\n\u003cdiv align=\"center\"\u003e\u003ch1\u003e \u003c/h1\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\u003ca name=\"Nvidia\"\u003e\u003c/a\u003e\n\u003cdiv align=\"center\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Nvidia_logo.png\" height=\"50\"\u003e \u003c/div\u003e\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\u003cdiv align=\"center\"\u003e\u003ch3\u003eGPU\u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca href=\"https://nvidianews.nvidia.com/news/nvidia-microsoft-accelerate-cloud-enterprise-ai\"\u003eNVIDIA Teams With Microsoft to Build Massive Cloud AI Computer\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eTens of Thousands of NVIDIA GPUs, NVIDIA Quantum-2 InfiniBand and Full Stack of NVIDIA AI Software Coming to Azure; NVIDIA, Microsoft and Global Enterprises to Use Platform for Rapid, Cost-Effective AI Development and Deployment\u003c/p\u003e\n\u003c/blockquote\u003e \n\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://developer.nvidia.com/blog/nvidia-hopper-architecture-in-depth\"\u003eNVIDIA Hopper Architecture In-Depth\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eToday during the 2022 NVIDIA GTC Keynote address, NVIDIA CEO Jensen Huang introduced the new NVIDIA H100 Tensor Core GPU based on the new NVIDIA Hopper GPU architecture. This post gives you a look inside the new H100 GPU and describes important new features of NVIDIA Hopper architecture GPUs.\u003c/p\u003e\n\u003c/blockquote\u003e \n \n\u003cp\u003e\u003ca href=\"https://www.anandtech.com/show/16610/nvidia-unveils-grace-a-highperformance-arm-server-cpu-for-use-in-ai-systems\"\u003eNVIDIA Unveils Grace: A High-Performance Arm Server CPU For Use In Big AI Systems\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eKicking off another busy Spring GPU Technology Conference for NVIDIA, this morning the graphics and accelerator designer is announcing that they are going to once again design their own Arm-based CPU/SoC. Dubbed Grace – after Grace Hopper, the computer programming pioneer and US Navy rear admiral – the CPU is NVIDIA’s latest stab at more fully vertically integrating their hardware stack by being able to offer a high-performance CPU alongside their regular GPU wares. According to NVIDIA, the chip is being designed specifically for large-scale neural network workloads, and is expected to become available in NVIDIA products in 2023.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\u003ca name=\"Intel\"\u003e\u003c/a\u003e\n\u003cdiv align=\"center\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Intel_logo.png\" height=\"60\"\u003e\u003c/div\u003e\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003ca name=\"Mobileye\"\u003e\u003c/a\u003e\n\u003cdiv align=\"center\"\u003e\u003ch3\u003eMobileye EyeQ\u003c/h3\u003e\u003c/div\u003e\n\u003e Mobileye is currently developing its fifth generation SoC, the \u003ca href=\"https://www.mobileye.com/our-technology/evolution-eyeq-chip/\"\u003eEyeQ®5\u003c/a\u003e, to act as the vision central computer performing sensor fusion for Fully Autonomous Driving (Level 5) vehicles that will hit the road in 2020. To meet power consumption and performance targets, EyeQ® SoCs are designed in most advanced VLSI process technology nodes – down to 7nm FinFET in the 5th generation. \n\n\u003ca name=\"Loihi 2\"\u003e\u003c/a\u003e\n\u003cdiv align=\"center\"\u003e\u003ch3\u003eLoihi\u003c/h3\u003e\u003c/div\u003e\n\u003cp\u003e\u003ca href=\"https://www.intel.com/content/www/us/en/newsroom/news/intel-unveils-neuromorphic-loihi-2-lava-software.html\"\u003eIntel Advances Neuromorphic with Loihi 2, New Lava Software Framework and New Partners\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eSecond-generation research chip uses pre-production Intel 4 process, grows to 1 million neurons. Intel adds open software framework to accelerate developer innovation and path to commercialization.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca name=\"Habana\"\u003e\u003c/a\u003e\u003c/p\u003e\n\u003cdiv align=\"center\"\u003e\u003ch3\u003eHabana\u003c/h3\u003e\u003c/div\u003e\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://www.intel.com/content/www/us/en/newsroom/news/vision-2022-habana-gaudi2-greco.html\"\u003eIntel’s Habana Labs Launches Second-Generation AI Processors for Training and Inferencing\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eToday at Intel Vision, Intel announced that Habana Labs, its data center team focused on AI deep learning processor technologies, launched its second-generation deep learning processors for training and inference: Habana® Gaudi®2 and Habana® Greco™. These new processors address an industry gap by providing customers with high-performance, high-efficiency deep learning compute choices for both training workloads and inference deployments in the data center while lowering the AI barrier to entry for companies of all sizes.\u003c/p\u003e\n\u003c/blockquote\u003e\n \n\u003cp\u003e\u003ca href=\"https://habana.ai/aws-launches-ec2-dl1-instances/\"\u003eHabana Gaudi debuts in the Amazon EC2 cloud\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eThe primary motivation to create this new training instance class was presented by Andy Jassy in the 2020 re:Invent: “To provide our end-customers with up to 40% better price-performance than the current generation of GPU-based instances.”\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\u003ca name=\"Qualcomm\"\u003e\u003c/a\u003e\n\u003cdiv align=\"center\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Qualcomm_logo.png\" height=\"40\"\u003e\u003c/div\u003e\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003ca href=\"https://www-forbes-com.cdn.ampproject.org/c/s/www.forbes.com/sites/karlfreund/2022/11/16/qualcomm-ups-the-snapgragon-ai-game/amp/\"\u003eQualcomm Ups The Snapgragon AI Game\u003c/a\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eThe leader in premium mobile SoCs has applied AI across the entire platform.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cstrong\u003e\u003ca href=\"https://www.qualcomm.com/products/technology/processors/cloud-artificial-intelligence/cloud-ai-100\"\u003eQualcomm Cloud AI 100\u003c/a\u003e\u003c/strong\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eThe Qualcomm Cloud AI 100, designed for AI inference acceleration, addresses unique requirements in the cloud, including power efficiency, scale, process node advancements, and signal processing—facilitating the ability of datacenters to run inference on the edge cloud faster and more efficiently. Qualcomm Cloud AI 100 is designed to be a leading solution for datacenters who increasingly rely on infrastructure at the edge-cloud.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\u003ca name=\"Samsung\"\u003e\u003c/a\u003e\n\u003cdiv align=\"center\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Samsung_logo.png\" height=\"35\"\u003e\u003c/div\u003e\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\u003cstrong\u003e\u003ca href=\"https://news.samsung.com/global/samsung-brings-on-device-ai-processing-for-premium-mobile-devices-with-exynos-9-series-9820-processor\"\u003eSamsung Brings On-device AI Processing for Premium Mobile Devices with Exynos 9 Series 9820 Processor\u003c/a\u003e\u003c/strong\u003e\n\u003e Fourth-generation custom core and 2.0Gbps LTE Advanced Pro modem enables enriched mobile experiences including AR and VR applications \n\n\u003cbr\u003e \nSamsung resently unveiled “\u003ca href=\"https://news.samsung.com/global/samsung-optimizes-premium-exynos-9-series-9810-for-ai-applications-and-richer-multimedia-content\"\u003eThe new Exynos 9810 brings premium features with a 2.9GHz custom CPU, an industry-first 6CA LTE modem and deep learning processing capabilities\u003c/a\u003e”.   \n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\u003ca name=\"AMD\"\u003e\u003c/a\u003e\n\u003cdiv align=\"center\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/AMD_logo.png\" height=\"35\"\u003e\u003c/div\u003e\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\nThe soon to be released \u003ca href=\"https://www.amd.com/en/graphics/instinct-server-accelerators\"\u003eAMD Instinct™ MI Series Accelerators\u003c/a\u003e\n\u003e AMD Instinct™ accelerators are engineered from the ground up for this new era of data center computing, supercharging HPC and AI workloads to propel new discoveries. The AMD Instinct™ family of accelerators can deliver industry leading performance for the data center at any scale from single server solutions up to the world’s largest supercomputers.1 With new innovations in AMD CDNA™ 2 architecture, AMD Infinity Fabric™ technology and packaging technology, the latest AMD Instinct™ accelerators are designed to power discoveries at exascale, enabling scientists to tackle our most pressing challenges.\n\n\u003cp\u003e\u003ca name=\"IBM\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/IBM_logo.png\" height=\"40\"\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca href=\"https://www.ibm.com/blogs/systems/ibm-telum-processor-the-next-gen-microprocessor-for-ibm-z-and-ibm-linuxone/\"\u003eMeet the IBM Artificial Intelligence Unit\u003c/a\u003e\u003c/p\u003e\n\n\u003cblockquote\u003e\n  \u003cp\u003eIt’s our first complete system-on-chip designed to run and train deep learning models faster and more efficiently than a general-purpose CPU.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca href=\"https://www.ibm.com/blogs/systems/ibm-telum-processor-the-next-gen-microprocessor-for-ibm-z-and-ibm-linuxone/\"\u003eIBM Telum Processor: the next-gen microprocessor for IBM Z and IBM LinuxONE\u003c/a\u003e\u003c/p\u003e\n\n\u003cblockquote\u003e\n  \u003cp\u003eThe 7 nm microprocessor is engineered to meet the demands our clients face for gaining AI-based insights from their data without compromising response time for high volume transactional workloads. \u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca href=\"https://www.ibm.com/blogs/research/tag/truenorth/\"\u003eTrueNorth\u003c/a\u003e is IBM's Neuromorphic CMOS ASIC developed in conjunction with the DARPA \u003ca href=\"https://en.wikipedia.org/wiki/SyNAPSE\"\u003eSyNAPSE\u003c/a\u003e program.\u003c/p\u003e\n\n\u003cblockquote\u003e\n  \u003cp\u003eIt is a manycore processor network on a chip design, with 4096 cores, each one simulating 256 programmable silicon \"neurons\" for a total of just over a million neurons. In turn, each neuron has 256 programmable \"synapses\" that convey the signals between them. Hence, the total number of programmable synapses is just over 268 million (228). In terms of basic building blocks, its transistor count is 5.4 billion. Since memory, computation, and communication are handled in each of the 4096 neurosynaptic cores, TrueNorth circumvents the von-Neumann-architecture bottlenecks and is very energy-efficient, consuming 70 milliwatts, about 1/10,000th the power density of conventional microprocessors. \u003ca href=\"https://en.wikipedia.org/wiki/TrueNorth\"\u003eWikipedia\u003c/a\u003e\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca href=\"https://www.research.ibm.com/artificial-intelligence/ai-hardware-center/\"\u003eAI Hardware Center\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003e\"The IBM Research AI Hardware Center is a global research hub headquartered in Albany, New York. The center is focused on enabling next-generation chips and systems that support the tremendous processing power and unprecedented speed that AI requires to realize its full potential.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"Marvell\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Marvell_logo.png\" height=\"60\"\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca href=\"https://www.marvell.com/products/data-processing-units.html\"\u003eData Processing Units\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eBuilt on seven generations of the industry’s first, most scalable and widely adopted data infrastructure processors, Marvell’s OCTEON™, OCTEON™ Fusion and ARMADA® platforms are optimized for wireless infrastructure, wireline carrier networks, enterprise and cloud data centers.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch2\u003e\u003ca name=\"Tech_Giants\"\u003e\u003c/a\u003eII. Tech Giants \u0026 HPC Vendors\u003c/h2\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cHR\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"Google\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Google_logo.png\" height=\"40\"\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://www.zdnet.com/article/google-tensor-everything-you-need-to-know-about-the-pixel-6-chip/\"\u003eGoogle Tensor: Everything you need to know about the Pixel 6 chip\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eGoogle has taken the wraps off its latest Pixel smartphones and, among the changes, the one with the biggest long-term impact is the switch to in-house silicon for the search giant.\u003c/p\u003e\n\u003c/blockquote\u003e\n \n\u003cp\u003e\u003ca href=\"https://www.hpcwire.com/2021/05/20/google-launches-tpu-v4-ai-chips/\"\u003eGoogle Launches TPU v4 AI Chips\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eGoogle CEO Sundar Pichai spoke for only one minute and 42 seconds about the company’s latest TPU v4 Tensor Processing Units during his keynote at the Google I/O virtual conference this week, but it may have been the most important and awaited news from the event.\u003c/p\u003e\n\u003c/blockquote\u003e\n \n\u003cp\u003e\u003ca href=\"https://cloud.google.com/tpu\"\u003eCloud TPU\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eMachine learning has produced business and research breakthroughs ranging from network security to medical diagnoses. We built the Tensor Processing Unit (TPU) in order to make it possible for anyone to achieve similar breakthroughs. Cloud TPU is the custom-designed machine learning ASIC that powers Google products like Translate, Photos, Search, Assistant, and Gmail. Here’s how you can put the TPU and machine learning to work accelerating your company’s success, especially at scale.\u003c/p\u003e\n\u003c/blockquote\u003e\n \n\u003cp\u003e\u003ca href=\"https://cloud.google.com/edge-tpu/\"\u003eEdge TPU\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eAI is pervasive today, from consumer to enterprise applications. With the explosive growth of connected devices, combined with a demand for privacy/confidentiality, low latency, and bandwidth constraints, AI models trained in the cloud increasingly need to be run at the edge. Edge TPU is Google’s purpose-built ASIC designed to run AI at the edge. It delivers high performance in a small physical and power footprint, enabling the deployment of high-accuracy AI at the edge.\u003c/p\u003e\n\u003c/blockquote\u003e\n \n\u003cp\u003eOther references are:\u003cbr\u003e\n\u003ca href=\"https://mp.weixin.qq.com/s/b22p26_delWfSpy9kDJKhA\"\u003eGoogle TPU3 看点\u003c/a\u003e\u003cbr\u003e\u003cbr\u003e\n\u003ca href=\"https://mp.weixin.qq.com/s/Kf_L4u7JRxJ8kF3Pi8M5iw\"\u003eGoogle TPU 揭密\u003c/a\u003e\u003cbr\u003e\u003cbr\u003e\n\u003ca href=\"https://mp.weixin.qq.com/s/lBQyNSNa6-joeLZ_Kq2W8A\"\u003eGoogle的神经网络处理器专利\u003c/a\u003e\u003cbr\u003e\u003cbr\u003e\n\u003ca href=\"https://mp.weixin.qq.com/s/g-BDlvSy-cx4AKItcWF7jQ\"\u003e脉动阵列 - 因Google TPU获得新生\u003c/a\u003e\u003cbr\u003e\u003cbr\u003e\n\u003ca href=\"https://www.linkedin.com/pulse/should-we-all-embrace-systolic-arrays-chien-ping-lu\"\u003eShould We All Embrace Systolic Arrays?\u003c/a\u003e\u003cbr\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"Amazon_AWS\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Amazon_AWS.png\" height=\"50\"\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://aws.amazon.com/cn/machine-learning/trainium/\"\u003eAWS Trainium\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eAWS Trainium is the second custom machine learning (ML) chip designed by AWS that provides the best price performance for training deep learning models in the cloud.  Trainium offers the highest performance with the most teraflops (TFLOPS) of compute power for the fastest ML training in Amazon EC2 and enables a broader set of ML applications. The Trainium chip is specifically optimized for deep learning training workloads for applications including image classification, semantic search, translation, voice recognition, natural language processing and recommendation engines.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca href=\"https://aws.amazon.com/cn/machine-learning/inferentia/\"\u003eAWS Inferentia. High performance machine learning inference chip, custom designed by AWS.\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eAWS Inferentia provides high throughput, low latency inference performance at an extremely low cost. Each chip provides hundreds of TOPS (tera operations per second) of inference throughput to allow complex models to make fast predictions. For even more performance, multiple AWS Inferentia chips can be used together to drive thousands of TOPS of throughput. AWS Inferentia will be available for use with Amazon SageMaker, Amazon EC2, and Amazon Elastic Inference.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"Microsoft\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Microsoft_logo.png\" height=\"60\"\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"Apple\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Apple_logo.png\" height=\"60\"\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"Alibaba\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/alibaba_logo.png\" height=\"60\"\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca href=\"https://medium.com/syncedreview/alibabas-new-ai-chip-can-process-nearly-80k-images-per-second-63412dec22a3\"\u003eAlibaba’s New AI Chip Can Process Nearly 80K Images Per Second\u003c/a\u003e\u003c/p\u003e\n\n\u003cblockquote\u003e\n  \u003cp\u003eAt the Alibaba Cloud (Aliyun) Apsara Conference 2019, Pingtouge unveiled its first AI dedicated processor for cloud-based large-scale AI inferencing. The Hanguang 800 is the first semiconductor product in Alibaba’s 20-year history.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"Tencent_Cloud\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Tencent_Cloud_logo.png\" height=\"30\"\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\u003cp\u003e\u003ca href=\"https://www.datacenterdynamics.com/en/news/tencent-reveals-three-data-center-chips-for-ai-video-transcoding-and-networking/\"\u003eTencent reveals three data center chips - for AI, video transcoding, and networking\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eThe company claims that the Zixiao AI chip is twice as good as comparable competing products, video transcoding chip Canghai was 30 percent better, and SmartNIC Xuanling was apparently four times as good. It did not provide external benchmarks or specific product details.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cbr /\u003e\n\u003ca name=\"Baidu\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Baidu_logo.png\" height=\"40\"\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca href=\"https://www.reuters.com/technology/baidu-says-2nd-gen-kunlun-ai-chips-enter-mass-production-2021-08-18/\"\u003eBaidu says 2nd-gen Kunlun AI chips enter mass production\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eChinese tech giant Baidu said on Wednesday it had begun mass-producing second-generation Kunlun artificial intelligence (AI) chips, as it races to become a key player in the chip industry which Beijing is trying to strengthen.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"Fujitsu\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Fujitsu_logo.png\" height=\"40\"\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cblockquote\u003e\n  \u003cp\u003eThis \u003ca href=\"https://www.nextplatform.com/2017/08/09/fujitsu-bets-deep-leaning-hpc-divergence/\"\u003eDLU that Fujitsu is creating\u003c/a\u003e is done from scratch, and it is not based on either the Sparc or ARM instruction set and, in fact, it has its own instruction set and a new data format specifically for deep learning, which were created from scratch. \n  Japanese computing giant Fujitsu. Which knows a thing or two about making a very efficient and highly scalable system for HPC workloads, as evidenced by the K supercomputer, does not believe that the HPC and AI architectures will converge. Rather, the company is banking on the fact that these architectures will diverge and will require very specialized functions. \u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"Nokia\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Nokia_logo.png\" height=\"30\"\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cblockquote\u003e\n  \u003cp\u003eNokia has developed the \u003ca href=\"https://networks.nokia.com/5g/reefshark\"\u003eReefShark chipsets\u003c/a\u003e for its 5G network solutions. AI is implemented in the ReefShark design for radio and embedded in the baseband to use augmented deep learning to trigger smart, rapid actions by the autonomous, cognitive network, enhancing network optimization and increasing business opportunities.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"Facebook\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/facebook_logo.png\" height=\"50\"\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca href=\"https://www.reuters.com/technology/facebook-developing-machine-learning-chip-information-2021-09-09/\"\u003eFacebook developing machine learning chip - The Information\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eFacebook Inc (FB.O) is developing a machine learning chip to handle tasks such as content recommendation to users, The Information reported on Thursday, citing two people familiar with the project.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"Tesla\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Tesla_logo.png\" height=\"60\"\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://www.forbes.com/sites/jamesmorris/2022/10/06/teslas-biggest-news-at-ai-day-was-the-dojo-supercomputer-not-the-optimus-robot/\"\u003eTesla’s Biggest News At AI Day Was The Dojo Supercomputer, Not The Optimus Robot\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eElon Musk played AI Day to the crowd with the focus on the Optimus humanoid robot. But while this could have a huge impact on our lives and society if it does enter mass production at the price Musk suggested ($20,000), another part of the presentation will have more immediate effects. That was the status report on the Dojo supercomputer. It could really change the world much more quickly than a bipedal bot.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca href=\"https://semianalysis.com/tesla-dojo-ai-super-computer-unique-packaging-and-chip-design-allow-an-order-magnitude-advantage-over-competing-ai-hardware/\"\u003eTesla Dojo – Unique Packaging and Chip Design Allow An Order Magnitude Advantage Over Competing AI Hardware\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eTesla hosted their AI Day and revealed the innerworkings of their software and hardware infrastructure. Part of this reveal was the previously teased Dojo AI training chip. Tesla claims their D1 Dojo chip has a GPU level compute, CPU level flexibility, with networking switch IO. \u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch2\u003e\u003ca name=\"IP_Vendors\"\u003e\u003c/a\u003eIII. Traditional IP Vendors\u003c/h2\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cHR\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"ARM\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/ARM_logo.png\" height=\"30\"\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\u003ca href=\"https://www.arm.com/products/silicon-ip-cpu/ethos/ethos-n78\"\u003eNPU ETHOS-N78\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eSpecifically designed for inference at the edge, the ML processor gives an industry-leading performance of 4.6 TOPs, with a stunning efficiency of 3 TOPs/W for mobile devices and smart IP cameras.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://www.anandtech.com/show/12791/arm-details-project-trillium-mlp-architecture\"\u003eARM Details \"Project Trillium\" Machine Learning Processor Architecture\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eArm’s second-generation, highly scalable and efficient NPU, the Ethos-N78 enables new immersive applications with a 2.5x increase in single-core performance now scalable from 1 to 10 TOP/s and beyond through many-core technologies. It provides flexibility to optimize the ML capability with 90+ configurations.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"Synopsys\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Synopsys_logo.png\" height=\"40\"\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://news.synopsys.com/2022-04-19-Synopsys-Introduces-Industrys-Highest-Performance-Neural-Processor-IP\"\u003eSynopsys Introduces Industry's Highest Performance Neural Processor IP\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eNew DesignWare ARC NPX6 NPU IP Delivers Up to 3,500 TOPS Performance for Automotive, Consumer and Data Center Chip Designs\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"Imagination\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Imagination_logo.png\" height=\"60\"\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca href=\"https://www.imaginationtech.com/products/ai/\"\u003eAI Processors\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eWhether you want smartness residing in the palm of your hand, consumer products or industrial robots, or enabled by powerful servers in the cloud, we can help you achieve your vision. We enable the smartness in your products with our PowerVR Neural Network Accelerators (NNA) and GPUs. Our NC-SDK enables seamless deployment of AI acceleration on either our hardware IP either in isolation or combined. Our NNA provides maximum efficiency with a scalable architecture which enables a wide range of smart edge and end point devices from low performance IoT to high performance RoboTaxi.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"CEVA\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/CEVA_logo.png\" height=\"40\"\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\u003cp\u003e\u003ca href=\"https://www.ceva-dsp.com/app/deep-learning/\"\u003eDeep learning for the real-time embedded world\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eOne solution lies in supplying a dedicated low power AI processor for Deep Learning at the edge, combined with a deep neural network (DNN) graph compiler\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\u003cp\u003e\u003ca name=\"Cadence\"\u003e\u003c/a\u003e\u003c/p\u003e\n\u003cdiv align=\"center\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Cadence_logo.png\" height=\"40\"\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\u003cp\u003e\u003ca href=\"https://www.cadence.com/en_US/home/tools/ip/tensilica-ip/tensilica-ai-platform.html\"\u003eTensilica AI Platform\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\u003cp\u003e\u003ca name=\"VeriSilicon\"\u003e\u003c/a\u003e\u003c/p\u003e\n\u003cdiv align=\"center\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/VeriSilicon_logo.png\" height=\"40\"\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\u003cp\u003e\u003ca href=\"https://www.verisilicon.com/en/IPPortfolio/VivanteNPUIP\"\u003eVivante® NPU IP\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eVeriSilicon's Neural Network Processor (NPU) IP is a highly scalable, programmable computer vision and artificial intelligence processor that supports AI operations upgrades for endpoints, edge devices, and cloud devices. Designed to meet a variety of chip sizes and power budgets, the Vivante NPU IP is a cost-effective, high-quality neural network acceleration engine solution.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch2\u003e\u003ca name=\"Startups\"\u003e\u003c/a\u003eIV. Startups\u003c/h2\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cHR\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"Cerebras\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://www.cerebras.net/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Cerebras_logo.png\" height=\"50\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://www.cerebras.net/press-release/cerebras-unveils-andromeda-a-13.5-million-core-ai-supercomputer-that-delivers-near-perfect-linear-scaling-for-large-language-models\"\u003eCerebras Unveils Andromeda, a 13.5 Million Core AI Supercomputer that Delivers Near-Perfect Linear Scaling for Large Language Models\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eDelivering more than 1 Exaflop of AI compute and 120 Petaflops of dense compute, Andromeda is one of the largest AI supercomputers ever built, and is dead simple to use\u003c/p\u003e\n\u003c/blockquote\u003e \n\n\u003cp\u003e\u003ca href=\"https://www.cerebras.net/blog/cerebras-sets-record-for-largest-ai-models-ever-trained-on-single-device\"\u003eCerebras Sets Record for Largest AI Models Ever Trained on Single Device\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eWe are announcing the largest models ever trained on a single device. Using the Cerebras Software Platform (CSoft), our customers can easily train state-of-the-art GPT language models (such as GPT-3[i] and GPT-J[ii]) with up to 20 billion parameters on a single CS-2 system. Running on a single CS-2, these models take minutes to set up and users can quickly move between models with just a few keystrokes. With clusters of GPUs, this takes months of engineering work.\u003c/p\u003e\n\u003c/blockquote\u003e \n \n\u003cp\u003e\u003ca href=\"https://www.anandtech.com/show/17061/cerebras-completes-series-f-funding-another-250m-for-4b-valuation\"\u003eCerebras Completes Series F Funding, Another $250M for $4B Valuation\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eThe new Series F funding round nets the company another $250m in capital, bringing the total raised through venture capital up to $720 million.\u003c/p\u003e\n\u003c/blockquote\u003e \n \n\u003cp\u003e\u003ca href=\"https://www.anandtech.com/show/16626/cerebras-unveils-wafer-scale-engine-two-wse2-26-trillion-transistors-100-yield\"\u003eCerebras Unveils Wafer Scale Engine Two (WSE2): 2.6 Trillion Transistors, 100% Yield\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eTwo years ago Cerebras unveiled a revolution in silicon design: a processor as big as your head, using as much area on a 12-inch wafer as a rectangular design would allow, built on 16nm, focused on both AI as well as HPC workloads. Today the company is launching its second generation product, built on TSMC 7nm, with more than double the cores and more than double of everything.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca href=\"https://techcrunch.com/2019/11/19/the-cerebras-cs-1-computes-deep-learning-ai-problems-by-being-bigger-bigger-and-bigger-than-any-other-chip/\"\u003eThe Cerebras CS-1 computes deep learning AI problems by being bigger, bigger, and bigger than any other chip\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eToday, the company announced the launch of its end-user compute product, the Cerebras CS-1, and also announced its first customer of Argonne National Laboratory.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"Graphcore\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://www.graphcore.ai/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Graphcore_logo.png\" height=\"70\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://www.eetimes.com/graphcore-supercharges-ipu-with-wafer-on-wafer/\"\u003eGraphcore Supercharges IPU with Wafer-on-Wafer\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eGraphcore unveiled its third-generation intelligence processing unit (IPU), the first processor to be built using 3D wafer-on-wafer (WoW) technology.\u003c/p\u003e\n\u003c/blockquote\u003e\n \n\u003cp\u003e\u003ca href=\"https://www.graphcore.ai/mk2-benchmarks\"\u003eMK2 PERFORMANCE BENCHMARKS\u003c/a\u003e\u003c/p\u003e\n\n\u003cp\u003e\u003ca href=\"https://techcrunch.com/2020/02/24/graphcore-the-ai-chipmaker-raises-another-150m-at-a-1-95b-valuation/\"\u003eGraphcore, the AI chipmaker, raises another $150M at a $1.95B valuation\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eGraphcore, the Bristol-based startup that designs processors specifically for artificial intelligence applications, announced it has raised another $150 million in funding for R\u0026D and to continue bringing on new customers. It’s valuation is now $1.95 billion.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca href=\"https://mp.weixin.qq.com/s/CH9h8dUtoNK_2ZfkK5YU0g\"\u003e解密又一个xPU：Graphcore的IPU\u003c/a\u003e give some analysis on its IPU architecture.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://mp.weixin.qq.com/s/AMuqeaShqEv3DnibH3scEA\"\u003eGraphcore AI芯片：更多分析\u003c/a\u003e More analysis.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://mp.weixin.qq.com/s/qP0zsSA7SQWXDqWGEAXmOg\"\u003e深度剖析AI芯片初创公司Graphcore的IPU\u003c/a\u003e In-depth analysis after more information was disclosed.\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"Tenstorrent\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"http://tenstorrent.com/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Tenstorrent_logo.png\" height=\"100\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca href=\"https://www.prnewswire.com/news-releases/tenstorrent-raises-over-200-million-at-1-billion-valuation-to-create-programmable-high-performance-ai-computers-301295913.html\"\u003eTenstorrent Raises over $200 million at $1 billion Valuation to Create Programmable, High Performance AI Computers\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eTORONTO, May 20, 2021 /PRNewswire/ - Tenstorrent, a hardware start-up developing next generation computers, announced today that it has raised over $200 million in a recent funding round that values the company at $1 billion. The round was led by Fidelity Management and Research Company and includes additional investments from Eclipse Ventures, Epic CG and Moore Capital. \u003c/p\u003e\n\u003c/blockquote\u003e \n \n\u003cp\u003e\u003ca href=\"https://www.anandtech.com/show/16709/an-interview-with-tenstorrent-ceo-ljubisa-bajic-and-cto-jim-keller\"\u003eAn Interview with Tenstorrent: CEO Ljubisa Bajic and CTO Jim Keller\u003c/a\u003e\u003c/p\u003e\n \n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"Blaize\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://www.blaize.com/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Blaize_logo.png\" height=\"40\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca href=\"https://www.eetimes.com/automotive-ai-startup-blaize-closes-71-million-funding-round/\"\u003eAutomotive AI Startup Blaize Closes $71 Million Funding Round\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eBlaize, formerly ThinCI, has closed a Series D round of funding at $71 million. New investor Franklin Templeton and existing investor Temasek led the round, along with participation from Denso and other new and existing investors. This round brings Blaize’s total funding to around $155 million total.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"Koniku\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"http://koniku.io/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Koniku_logo.png\" height=\"50\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cblockquote\u003e\n  \u003cp\u003eFounded in 2014, Newark, California startup \u003ca href=\"http://koniku.io/\"\u003eKoniku\u003c/a\u003e has taken in $1.65 million in funding so far to become “the world’s first neurocomputation company“. The idea is that since the brain is the most powerful computer ever devised, why not reverse engineer it? Simple, right? Koniku is actually integrating biological neurons onto chips and has made enough progress that they claim to have AstraZeneca as a customer. Boeing has also signed on with a letter of intent to use the technology in chemical-detecting drones.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"Adapteva\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"http://www.adapteva.com/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Adapteva_logo.png\" height=\"70\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca href=\"http://www.adapteva.com/\"\u003eAdapteva\u003c/a\u003e has taken in $5.1 million in funding from investors that include mobile giant Ericsson. \u003ca href=\"http://www.parallella.org/docs/e5_1024core_soc.pdf\"\u003eThe paper \"Epiphany-V: A 1024 processor 64-bit RISC System-On-Chip\"\u003c/a\u003e describes the design of Adapteva's 1024-core processor chip in 16nm FinFet technology. \u003c/p\u003e\n\n\u003cp\u003e\u003ca name=\"Mythic\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://mythic.ai/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Mythic_logo.png\" height=\"20\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca href=\"https://www.linkedin.com/pulse/era-analog-compute-has-arrived-michael-b-henry/\"\u003eThe Era of Analog Compute has Arrived!\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eResNet-50 in our prototype analog AI processor. Production release will support 900-1000 fps and INT8 accuracy at 3W.\u003c/p\u003e\n\u003c/blockquote\u003e\n \n\u003cp\u003e\u003ca href=\"https://venturebeat.com/2021/06/07/mythic-launches-analog-ai-processor-that-consumes-10-times-less-power/\"\u003eMythic launches analog AI processor that consumes 10 times less power\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eAnalog AI processor company Mythic launched its M1076 Analog Matrix Processor today to provide low-power AI processing.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"Brainchip\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"http://www.brainchipinc.com/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Brainchip_logo.png\" height=\"40\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca href=\"hhttps://venturebeat.com/2022/01/18/brainchip-launches-neuromorphic-process-for-ai-at-the-edge/\"\u003eBrainChip launches neuromorphic process for AI at the edge\u003c/a\u003e \u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eBrainChip today announced the commercialization of its Akida neural networking processor. Aimed at a variety of edge and internet of things (IoT) applications, BrainChip claims to be the first commercial producer of neuromorphic AI chips, which could deliver benefits in ultra-low power and performance over conventional approaches.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"Deepvision\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://deepvision.io/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Deepvision_logo.png\" height=\"40\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca href=\"AI Processor Chipmaker Deep Vision Raises $35 Million in Series B Funding\"\u003eAI Processor Chipmaker Deep Vision Raises $35 Million in Series B Funding\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eTiger Global Leads Series B Financing, Enabling Deep Vision to Expand Video Analytics and Natural Language Processing Capabilities in Edge Computing Applications\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"Groq\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch2\u003e\u003ca href=\"http://groq.com/\"\u003eGroq\u003c/a\u003e\u003c/h2\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://www.eetimes.com/groq-demos-fast-llms-on-4-year-old-silicon/\"\u003eGroq Demonstrates Fast LLMs on 4-Year-Old Silicon\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eMOUNTAIN VIEW, CALIF. — Groq has repositioned its first-generation AI inference chip as a language processing unit (LPU), and demonstrated Meta’s Llama-2 70-billion–parameter large language model (LLM) running inference at 240 tokens per second per user. Groq CEO Jonathan Ross told EE Times that the company had Llama-2 up and running on the company’s 10-rack (64-chip) cloud-based dev system in “a couple of days.” This system is based on the company’s first gen AI silicon, released four years ago.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca href=\"https://www.forbes.com/sites/amyfeldman/2021/04/14/ai-chip-startup-groq-founded-by-ex-googlers-raises-300-million-to-power-autonomous-vehicles-and-data-centers/\"\u003eAI Chip Startup Groq, Founded By Ex-Googlers, Raises $300 Million To Power Autonomous Vehicles And Data Centers\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eJonathan Ross left Google to launch next-generation semiconductor startup Groq in 2016. Today, the Mountain View, California-based firm said that it had raised $300 million led by Tiger Global Management and billionaire investor Dan Sundheim’s D1 Capital as it officially launched into public view. \u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"Kneron\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"http://www.kneron.com/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Kneron_logo.png\" height=\"60\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca href=\"https://www.prnewswire.com/news-releases/kneron-to-accelerate-edge-ai-development-with-more-than-10-million-usd-series-a-financing-300556674.html\"\u003eKneron to Accelerate Edge AI Development with more than 10 Million USD Series A Financing\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"GTI\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://www.gyrfalcontech.ai/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/GTI_Logo.png\" height=\"40\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003eAccording to this article, \u003ca href=\"https://www.prnewswire.com/news-releases/gyrfalcon-offers-automotive-ai-chip-technology-300860069.html\"\u003e\"Gyrfalcon offers Automotive AI Chip Technology\"\u003c/a\u003e\u003c/p\u003e\n\n\u003cblockquote\u003e\n  \u003cp\u003eGyrfalcon Technology Inc. (GTI), has been promoting matrix-based application specific chips for all forms of AI since offering their production versions of AI accelerator chips in September 2017. Through the licensing of its proprietary technology, the company is confident it can help automakers bring highly competitive AI chips to production for use in vehicles within 18 months, along with significant gains in AI performance, improvements in power dissipation and cost advantages.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"SambaNova\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://sambanovasystems.com/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/SambaNova_logo.png\" height=\"40\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://venturebeat.com/ai/sambanova-unveils-new-ai-chip-to-power-full-stack-ai-platform/\"\u003eSambaNova unveils new AI chip to power full-stack AI platform\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eToday Palo-Alto-based SambaNova Systems unveiled a new AI chip, the SN40L, which will power its full-stack large language model (LLM) platform, the SambaNova Suite, that helps enterprises go from chip to model — building and deploying customized generative AI models.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca href=\"https://techcrunch.com/2021/04/13/sambanova-raises-676m-at-a-5-1b-valuation-to-double-down-on-cloud-based-ai-software-for-enterprises/\"\u003eSambaNova raises $676M at a $5.1B valuation to double down on cloud-based AI software for enterprises\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eSambaNova — a startup building AI hardware and integrated systems that run on it that only officially came out of three years in stealth last December — is announcing a huge round of funding today to take its business out into the world. The company has closed on $676 million in financing, a Series D that co-founder and CEO Rodrigo Liang has confirmed values the company at $5.1 billion.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca href=\"https://sambanova.ai/articles/introducing-sambanova-systems-datascale-a-new-era-of-computing/\"\u003eIntroducing SambaNova Systems DataScale: A New Era of Computing\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eSambaNova has been working closely with many organizations the past few months and has established a new state of the art in NLP. This advancement in NLP deep learning is illustrated by a GPU-crushing, world record performance result achieved on SambaNova Systems’ Dataflow-optimized system. \u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca href=\"https://sambanova.ai/a-new-state-of-the-art-in-nlp-beyond-gpus/\"\u003eA New State of the Art in NLP: Beyond GPUs\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eSambaNova has been working closely with many organizations the past few months and has established a new state of the art in NLP. This advancement in NLP deep learning is illustrated by a GPU-crushing, world record performance result achieved on SambaNova Systems’ Dataflow-optimized system. \u003c/p\u003e\n\u003c/blockquote\u003e\n\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\u003cp\u003e\u003ca name=\"GreenWaves\"\u003e\u003c/a\u003e\u003c/p\u003e\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://greenwaves-technologies.com/en/greenwaves-technologies-2/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/GreenWaves_logo.png\" height=\"50\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\u003cp\u003e\u003ca href=\"https://www.eetimes.eu/greenwaves-shows-off-advanced-audio-demos/\"\u003eGreenWaves Shows Off Advanced Audio Demos\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eThe Gap9 processor, a successor to Gap8 which targets computer vision in IoT devices, is an ultra-low power neural network processor suitable for battery-powered devices. GreenWaves’ vice president of marketing Martin Croome told EE Times Europe that the company decided to focus Gap9 on the hearables market after receiving traction from this sector for Gap8.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"Lightelligence\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://www.lightelligence.ai/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Lightelligence_logo.png\" height=\"60\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://www.eetimes.com/optical-computing-chip-runs-hardest-math-problems-100x-faster-than-gpus/\"\u003eOptical Chip Solves Hardest Math Problems Faster than GPUs\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eOptical computing startup Lightelligence has demonstrated a silicon photonics accelerator running the Ising problem more than 100 times faster than a typical GPU setup.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"Lightmatter\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://www.lightmatter.ai/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Lightmatter_logo.png\" height=\"50\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca href=\"https://www.eetimes.com/lightmatter-raises-more-funding-for-photonic-ai-chip/\"\u003eLightmatter Raises More Funding for Photonic AI Chip\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eightmatter, the MIT spinout building AI accelerators with a silicon photonics computing engine, announced a Series B funding round, raising an additional $80 million. The company’s technology is based on proprietary silicon photonics technology which manipulates coherent light inside a chip to perform calculations very quickly while using very little power\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca name=\"Hailo\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://www.hailotech.com/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Hailo_logo.png\" height=\"60\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca href=\"https://www.eetimes.com/unicorn-ai-chipmaker-hailo-raises-136-million/\"\u003e‘Unicorn’ AI Chipmaker Hailo Raises $136 Million\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eIsraeli AI chip startup Hailo has raised $136 million in a Series C funding round, bringing the company’s total to $224 million. The company has also reportedly reached “unicorn” status.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"Tachyum\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"http://www.tachyum.com\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Tachyum_logo.png\" height=\"40\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca href=\"https://www.hpcwire.com/off-the-wire/tachyum-launches-prodigy-universal-processor/\"\u003eTachyum Launches Prodigy Universal Processor\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eMay 11, 2021 — Tachyum today launched the world’s first universal processor, Prodigy, which unifies the functionality of a CPU, GPU and TPU in a single processor, creating a homogeneous architecture, while delivering massive performance improvements at a cost many times less than competing products\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"Alphaics\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://www.alphaics.ai/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Alphaics_logo.png\" height=\"50\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca href=\"https://www.eetimes.com/alphaics-begins-sampling-its-deep-learning-co-processor/\"\u003eAlphaICs Begins Sampling Its Deep Learning Co-Processor\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eAlphaICs, a startup developing edge AI and learning silicon aimed at smart vision applications, is sampling its deep learning co-processor, Gluon, that also comes with a software development kit.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"Syntiant\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://www.syntiant.com/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Syntiant_logo.png\" height=\"30\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca href=\"https://semiengineering.com/syntiant-analog-deep-learning-chips/\"\u003eSyntiant: Analog Deep Learning Chips\u003c/a\u003e\u003c/p\u003e\n\n\u003cblockquote\u003e\n  \u003cp\u003eStartup Syntiant Corp. is an Irvine, Calif. semiconductor company led by former top Broadcom engineers with experience in both innovative design and in producing chips designed to be produced in the billions, according to company CEO Kurt Busch.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"aiCTX\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://aictx.ai/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/aiCTX_logo.png\" height=\"40\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://www.eetimes.com/document.asp?doc_id=1333983\"\u003eBaidu Backs Neuromorphic IC Developer\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\n\u003cblockquote\u003e\n  \u003cp\u003eMUNICH — Swiss startup aiCTX has closed a $1.5 million pre-A funding round from Baidu Ventures to develop commercial applications for its low-power neuromorphic computing and processor designs and enable what it calls “neuromorphic intelligence.” It is targeting low-power edge-computing embedded sensory processing systems.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca name=\"Flexlogix\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"http://www.flex-logix.com/nmax\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/flexlogix_logo.png\" height=\"40\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://www.zdnet.com/article/flex-logix-has-two-paths-to-making-a-lot-of-money-challenging-nvidia-in-ai/\"\u003eFlex Logix has two paths to making a lot of money challenging Nvidia in AI\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\n\u003cblockquote\u003e\n  \u003cp\u003eThe programmable chip company scores $55 million in venture backing, bringing its total haul to $82 million\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca name=\"PFN\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://projects.preferred.jp/mn-core/en/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/PFN_logo.png\" height=\"40\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://www.preferred-networks.jp/en/news\"\u003ePreferred Networks develops a custom deep learning processor MN-Core for use in MN-3, a new large-scale cluster, in spring 2020\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\n\u003cblockquote\u003e\n  \u003cp\u003eDec. 12, 2018, Tokyo Japan – Preferred Networks, Inc. (“PFN”, Head Office: Tokyo, President \u0026 CEO: Toru Nishikawa) announces that it is developing MN-Core (TM), a processor dedicated to deep learning and will exhibit this independently developed hardware for deep learning, including the MN-Core chip, board, and server, at the SEMICON Japan 2018, held at Tokyo Big Site. \n \u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca name=\"Cornami\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"http://cornami.com/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Cornami_logo.jpg\" height=\"30\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://www.zdnet.com/article/ai-startup-cornami-reveals-details-of-neural-net-chip/\"\u003eAI Startup Cornami reveals details of neural net chip\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\n\u003cblockquote\u003e\n  \u003cp\u003eStealth startup Cornami on Thursday revealed some details of its novel approach to chip design to run neural networks. CTO Paul Masters says the chip will finally realize the best aspects of a technology first seen in the 1970s. \n \u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca name=\"Anaflash\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"http://anaflash.ai/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Anaflash_logo.png\" height=\"40\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://www.smart2zero.com/news/ai-chip-startup-offers-new-edge-computing-solution\"\u003eAI chip startup offers new edge computing solution\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\n\u003cblockquote\u003e\n  \u003cp\u003eAnaflash Inc. (San Jose, CA) is a startup company that has developed a test chip to demonstrate analog neurocomputing taking place inside logic-compatible embedded flash memory. \n \u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca name=\"Optalysys\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://www.optalysys.com/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Optalysys_logo.png\" height=\"40\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://www.globenewswire.com/news-release/2019/03/07/1749510/0/en/Optalysys-launches-world-s-first-commercial-optical-processing-system-the-FT-X-2000.html\"\u003eOptalysys launches world’s first commercial optical processing system, the FT:X 2000\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\n\u003cblockquote\u003e\n  \u003cp\u003eOptalysys develops Optical Co-processing technology which enables new levels of processing capability delivered with a vastly reduced energy consumption compared with conventional computers. Its first coprocessor is based on an established diffractive optical approach that uses the photons of low-power laser light instead of conventional electricity and its electrons. This inherently parallel technology is highly scalable and is the new paradigm of computing. \n \u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca name=\"etacompute\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://etacompute.com/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/etacompute_logo.png\" height=\"80\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca href=\"https://spectrum.ieee.org/tech-talk/semiconductors/processors/lowpower-ai-startup-eta-compute-delivers-first-commercial-chips\"\u003eLow-Power AI Startup Eta Compute Delivers First Commercial Chips\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eThe firm pivoted away from riskier spiking neural networks using a new power management scheme\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca href=\"https://spectrum.ieee.org/tech-talk/semiconductors/processors/eta-compute-debuts-spiking-neural-network-chip-for-edge-ai\"\u003eEta Compute Debuts Spiking Neural Network Chip for Edge AI\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eChip can learn on its own and inference at 100-microwatt scale, says company at Arm TechCon.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca name=\"Achronix\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://www.achronix.com/product/speedster7t/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Achronix_logo.png\" height=\"30\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://www.eetimes.com/document.asp?doc_id=1334717\"\u003eAchronix Rolls 7-nm FPGAs for AI\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\n\u003cblockquote\u003e\n  \u003cp\u003eAchronix is back in the game of providing full-fledged FPGAs with a new high-end 7-nm family, joining the Gold Rush of silicon to accelerate deep learning. It aims to leverage novel design of its AI block, a new on-chip network, and use of GDDR6 memory to provide similar performance at a lower cost than larger rivals Intel and Xilinx.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca name=\"Areanna\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://areanna-ai.com/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Areanna_logo.png\" height=\"60\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://www.eetimes.com/document.asp?doc_id=1334947#\"\u003eStartup Runs AI in Novel SRAM\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\n\u003cblockquote\u003e\n  \u003cp\u003eAreanna is the latest example of an explosion of new architectures spawned by the rise of deep learning. The debut of a whole new approach to computing has fired imaginations of engineers around the industry hoping to be the next Hewlett and Packard.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca name=\"Neuroblade\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://www.neuroblade.ai/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Neuroblade_logo.png\" height=\"120\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://www.eetasia.com/news/article/NeuroBlade-Preps-Inference-Chip\"\u003eNeuroBlade Preps Inference Chip\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\n\u003cblockquote\u003e\n  \u003cp\u003eAdd NeuroBlade to the dozens of startups working on AI silicon. The Israeli company just closed a $23 million Series A, led by the founder of Check Point Software and with participation from Intel Capital.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca name=\"Luminous\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://www.luminouscomputing.com/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Luminous_logo.png\" height=\"90\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://www.technologyreview.com/s/613668/ai-chips-uses-optical-semiconductor-machine-learning/\"\u003eBill Gates just backed a chip startup that uses light to turbocharge AI\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\n\u003cblockquote\u003e\n  \u003cp\u003eLuminous Computing has developed an optical microchip that runs AI models much faster than other semiconductors while using less power.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca name=\"Efinix\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://www.efinixinc.com\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Efinix_logo.png\" height=\"25\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://www.zdnet.com/article/chip-startup-efinix-hopes-to-bootstrap-ai-efforts-in-iot/\"\u003eChip startup Efinix hopes to bootstrap AI efforts in IoT\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\n\u003cblockquote\u003e\n  \u003cp\u003eSix-year-old startup Efinix has created an intriguing twist on the FPGA technology dominated by Intel and Xiliinx; the company hopes its energy-efficient chips will bootstrap the market for embedded AI in the Internet of Things.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca name=\"AIstorm\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://aistorm.ai\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/AIstorm_logo.png\" height=\"60\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://venturebeat.com/2019/02/11/aistorm-raises-13-2-million-for-ai-edge-computing-chips/\"\u003eAIStorm raises $13.2 million for AI edge computing chips\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\n\u003cblockquote\u003e\n  \u003cp\u003eDavid Schie, a former senior executive at Maxim, Micrel, and Semtech, thinks both markets are ripe for disruption. He — along with WSI, Toshiba, and Arm veterans Robert Barker, Andreas Sibrai, and Cesar Matias — in 2011 cofounded AIStorm, a San Jose-based artificial intelligence (AI) startup that develops chipsets that can directly process data from wearables, handsets, automotive devices, smart speakers, and other internet of things (IoT) devices. \u003c/p\u003e\n\u003c/blockquote\u003e\n\n\n\u003cp\u003e\u003ca name=\"SiMa\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"http://sima.ai/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/SiMa_logo.png\" height=\"40\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://www.businesswire.com/news/home/20200512005313/en/SiMa.ai-Raises-30-Million-Series-Investment-Led\"\u003eSiMa.ai Raises $30 Million in Series A Investment Round Led by Dell Technologies Capital\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eSAN JOSE, Calif.--(BUSINESS WIRE)--SiMa.ai, the company enabling high performance machine learning to go green, today announced its Machine Learning SoC (MLSoC) platform – the industry’s first unified solution to support traditional compute with high performance, lowest power, safe and secure machine learning inference. Delivering the highest frames per second per watt, SiMa.ai’s MLSoC is the first machine learning platform to break the 1000 FPS/W barrier for ResNet-501. In customer engagements, the company has demonstrated 10-30x improvement in FPS/W through its automated software flow across a wide range of embedded edge applications, over today’s competing solutions. The platform will provide machine learning solutions that range from 50 TOPs@5W to 200 TOPs@20W, delivering an industry first of 10 TOPs/W for high performance inference.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca href=\"https://www.businesswire.com/news/home/20191022005079/en/SiMa.ai%E2%84%A2-Introduces-MLSoC%E2%84%A2\"\u003eSiMa.ai™ Introduces MLSoC™ – First Machine Learning Platform to Break 1000 FPS/W Barrier with 10-30x Improvement over Alternative Solutions\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eSiMa.ai, the company enabling high performance machine learning to go green, today announced its Machine Learning SoC (MLSoC) platform – the industry’s first unified solution to support traditional compute with high performance, lowest power, safe and secure machine learning inference. Delivering the highest frames per second per watt, SiMa.ai’s MLSoC is the first machine learning platform to break the 1000 FPS/W barrier for ResNet-501. In customer engagements, the company has demonstrated 10-30x improvement in FPS/W through its automated software flow across a wide range of embedded edge applications, over today’s competing solutions. The platform will provide machine learning solutions that range from 50 TOPs@5W to 200 TOPs@20W, delivering an industry first of 10 TOPs/W for high performance inference.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca name=\"Untether\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://untether.ai/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Untether_logo.png\" height=\"40\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://venturebeat.com/2021/07/20/untether-ai-nabs-125m-for-ai-acceleration-chips/\"\u003eUntether AI nabs $125M for AI acceleration chips\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eUntether AI, a startup developing custom-built chips for AI inferencing workloads, today announced it has raised $125 million from Tracker Capital Management and Intel Capital. The round, which was oversubscribed and included participation from Canada Pension Plan Investment Board and Radical Ventures, will be used to support customer expansion.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\n\u003cp\u003e\u003ca name=\"GrAI\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://www.graimatterlabs.ai/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/GrAI_logo.png\" height=\"40\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://venturebeat.com/2019/09/18/grai-matter-labs-reveals-neuronflow-technology-and-announces-graiflow-sdk/\"\u003eGrAI Matter Labs Reveals NeuronFlow Technology and Announces GrAIFlow SDK\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\n\u003cblockquote\u003e\n  \u003cp\u003eGrAI Matter Labs (aka GML), a neuromorphic computing pioneer today revealed NeuronFlow – a new programmable processor technology – and announced an early access program to its GrAIFlow software development kit.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca name=\"Rain\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"http://rain-neuromorphics.com/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Rain_logo.png\" height=\"40\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://www.crunchbase.com/organization/rain-neuromorphics\"\u003eRain Neuromorphics on Crunchbase\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eWe build artificial intelligence processors, inspired by the brain. Our mission is to enable brain-scale intelligence.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca name=\"ABR\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://appliedbrainresearch.com/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/ABR_logo.png\" height=\"40\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://www.crunchbase.com/organization/applied-brain-research\"\u003eApplied Brain Research on Crunchbase\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eABR makes the world's most advanced neuromoprhic compiler, runtime and libraries for the emerging space of neuromorphic computing.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca name=\"Xmos\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://www.xmos.com/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Xmos_logo.png\" height=\"40\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://www.eetimes.com/xmos-adapts-xcore-into-aiot-crossover-processor/\"\u003eXMOS adapts Xcore into AIoT ‘crossover processor’\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eEE Times exclusive! The new chip targets AI-powered voice interfaces in IoT devices — “the most important AI workload at the endpoint.”\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca href=\"https://venturebeat.com/2020/02/12/xmos-unveils-xcore-ai-a-powerful-chip-designed-for-ai-processing-at-the-edge/\"\u003eXMOS unveils Xcore.ai, a powerful chip designed for AI processing at the edge\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eThe latest xcore.ai is a crossover chip designed to deliver high-performance AI, digital signal processing, control, and input/output in a single device with prices from $1.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca name=\"DinoplusAI\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"http://dinoplus.ai/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/DinoplusAI_logo.png\" height=\"60\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cblockquote\u003e\n  \u003cp\u003eWe design and produce AI processors and the software to run them in data centers. Our unique approach optimizes for inference with the focus on performance, power efficiency, and ease of use; and at the same time our approach enables cost-effective training. \u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca name=\"Furiosa\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://www.furiosa.ai/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Furiosa_logo.png\" height=\"60\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cblockquote\u003e\n  \u003cp\u003eWe build high-performance AI inference coprocessors that can be seamlessly integrated into various computing platforms including data centers, servers, desktops, automobiles and robots. \u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca name=\"Corerain\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"http://www.corerain.com/en\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Corerain_logo.png\" height=\"60\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cblockquote\u003e\n  \u003cp\u003eCorerain provides ultra-high performance AI acceleration chips and the world's first streaming engine-based AI development platform.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca name=\"Perceive\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://perceive.io/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Perceive_logo.png\" height=\"60\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca href=\"https://venturebeat.com/2020/03/31/perceive-emerges-from-stealth-with-ergo-edge-ai-chip/\"\u003ePerceive emerges from stealth with Ergo edge AI chip\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eOn-device computing solutions startup Perceive emerged from stealth today with its first product: the Ergo edge processor for AI inference. CEO Steve Teig claims the chip, which is designed for consumer devices like security cameras, connected appliances, and mobile phones, delivers “breakthrough” accuracy and performance in its class.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca name=\"SimpleMachines\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://www.simplemachines.ai/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/SimpleMachines_logo.png\" height=\"60\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca href=\"https://www.design-reuse.com/news/49012/simplemachines-ai-chip-tsmc-16nm.html\"\u003eSimpleMachines, Inc. Debuts First-of-its-Kind High Performance Chip\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eAs traditional chip makers struggle to embrace the challenges presented by the rapidly evolving AI software landscape, a San Jose startup has announced it has working silicon and a whole new future-proof chip paradigm to address these issues.\n\nThe SimpleMachines, Inc. (SMI) team – which includes leading research scientists and industry heavyweights formerly of Qualcomm, Intel and Sun Microsystems – has created a first-of-its-kind easily programmable, high-performance chip that will accelerate a wide variety of AI and machine-learning applications. \u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca name=\"Neureality\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://www.neureality.ai/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Neureality_logo.png\" height=\"60\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://techcrunch.com/2022/12/06/neureality-ai-accelerator-chips-startup-raises-35m/\"\u003eNeuReality lands $35M to bring AI accelerator chips to market\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eNeuReality, a startup developing AI inferencing accelerator chips, has raised $35 million in new venture capital.\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca href=\"https://www.electronicsmedia.info/2021/05/06/neureality-unveiled-nr1-p-a-novel-ai-centric-inference-platform/\"\u003eNeuReality unveiled NR1-P, A novel AI-centric inference platform\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eNeuReality has unveiled NR1-P, a novel AI-centric inference platform. NeuReality has already started demonstrating its AI-centric platform to customers and partners. NeuReality has redefined today’s outdated AI system architecture by developing an AI-centric inference platform based on a new type of System-on-Chip (SoC). \u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca href=\"https://techcrunch.com/2021/02/10/neureality-raises-8m-for-its-novel-ai-inferencing-platform/\"\u003eNeuReality raises $8M for its novel AI inferencing platform\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eNeuReality, an Israeli AI hardware startup that is working on a novel approach to improving AI inferencing platforms by doing away with the current CPU-centric model, is coming out of stealth today and announcing an $8 million seed round. \u003c/p\u003e\n\u003c/blockquote\u003e\n \n\u003cp\u003e\u003ca name=\"AnalogInference\"\u003e\u003c/a\u003e\u003c/p\u003e\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://www.analog-inference.com/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/AnalogInference_logo.png\" height=\"60\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://www.eenewsanalog.com/news/analog-inference-startup-raises-106-million\"\u003eAnalog inference startup raises $10.6 million\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eThe company is backed by Khosla Ventures and is developing its first generation of products for AI computing at the edge. The company raised $4.5 million shortly after its formation in March 2018, so the latest tranche brings the total raised to-date to $15.1 million\u003c/p\u003e\n\u003c/blockquote\u003e\n\n\u003cp\u003e\u003ca name=\"Quadric\"\u003e\u003c/a\u003e\u003c/p\u003e\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://www.quadric.io/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Quatric_logo.png\" height=\"60\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://www.hpcwire.com/off-the-wire/quadric-announces-unified-silicon-and-software-platform-optimized-for-on-device-ai/\"\u003eQuadric Announces Unified Silicon and Software Platform Optimized for On-Device AI\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eBURLINGAME, Calif., June 22, 2021 — Quadric (quadric.io), an innovator in high-performance edge processing, has introduced a unified silicon and software platform that unlocks the power of on-device AI. \u003c/p\u003e\n\u003c/blockquote\u003e \n\n\u003cp\u003e\u003ca name=\"EdgeQ\"\u003e\u003c/a\u003e\u003c/p\u003e\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://edgeq.io/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/EdgeQ_logo.png\" height=\"60\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://techcrunch.com/2021/01/26/edgeq-reveals-more-details-behind-its-next-gen-5g-ai-chip/\"\u003eEdgeQ reveals more details behind its next-gen 5G/AI chip\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003e5G is the current revolution in wireless technology, and every chip company old and new is trying to burrow their way into this ultra-competitive — but extremely lucrative — market. One of the most interesting new players in the space is EdgeQ, a startup with a strong technical pedigree via Qualcomm that we covered last year after it raised a nearly $40 million Series A.\u003c/p\u003e\n\u003c/blockquote\u003e \n \n\u003cp\u003e\u003ca name=\"Innatera\"\u003e\u003c/a\u003e\u003c/p\u003e\n\u003cdiv align=\"center\"\u003e\u003ca href=\"http://www.innatera.com/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Innatera_logo.png\" height=\"60\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://www.eetimes.com/innatera-unveils-neuromorphic-ai-chip-to-accelerate-spiking-networks/\"\u003eInnatera Unveils Neuromorphic AI Chip to Accelerate Spiking Networks\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eInnatera, the Dutch startup making neuromorphic AI accelerators for spiking neural networks, has produced its first chips, gauged their performance, and revealed details of their architecture.\u003c/p\u003e\n\u003c/blockquote\u003e \n \n\u003cp\u003e\u003ca name=\"Ceremorphic\"\u003e\u003c/a\u003e\u003c/p\u003e\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://ceremorphic.com/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Ceremorphic_logo.png\" height=\"60\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://www.eetimes.com/redpine-founder-launches-ai-processor-startup/\"\u003eRedpine Founder Launches AI Processor Startup\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eCeremorphic, an AI chip startup emerging from stealth mode this week, is readying a heterogeneous AI processor aimed at model training in data centers, automotive, high-performance computing, robotics and other emerging applications.\u003c/p\u003e\n\u003c/blockquote\u003e \n \n\u003cp\u003e\u003ca name=\"Aspinity\"\u003e\u003c/a\u003e\u003c/p\u003e\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://www.aspinity.com/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Aspinity_logo.png\" height=\"60\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://embeddedcomputing.com/technology/analog-and-power/analog-semicundoctors-sensors/aspinity-analog-ml-chip-allows-battery-powered-always-on\"\u003eAspinity Analog ML Chip Allows Battery-Powered “Always On”\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eMachine learning (ML) is all about massive amounts of processing, DSP, etc., right? Maybe not, according to the team at Aspinity. The company continues to push ahead on the analog front. The latest member of the company’s analogML family, the AML100, operates completely in the analog domain. As a result, it can reduce always-on system power by 95% (for the record, we had to walk through this a couple of times before I believed them).\u003c/p\u003e\n\u003c/blockquote\u003e \n \n\u003cp\u003e\u003ca name=\"Teramem\"\u003e\u003c/a\u003e\u003c/p\u003e\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://www.tetramem.com/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/Teramem_logo.png\" height=\"60\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"https://www.tetramem.com/posts/TetraMem-Technology-Debut-at-Linley\"\u003eTetraMem enjoyed an exciting public debut of our analog in-memory compute technology at the Linley Spring 2022 Processor Conference.\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003e\u003c/p\u003e\n\u003c/blockquote\u003e \n \n\u003cp\u003e\u003ca name=\"d-matrix\"\u003e\u003c/a\u003e\u003c/p\u003e\n\u003cdiv align=\"center\"\u003e\u003ca href=\"https://www.d-matrix.ai/\"\u003e\u003cimg src=\"https://github.com/basicmi/Deep-Learning-Processor-List/raw/master/resource/d-matrix_logo.png\" height=\"60\"\u003e\u003c/a\u003e\u003c/div\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003ca href=\"www.reuters.com/technology/ai-chip-startup-d-matrix-raises-110-mln-with-backing-microsoft-2023-09-06/\"\u003eExclusive: AI chip startup d-Matrix raises $110 million with backing from Microsoft\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eSept 6 (Reuters) - Silicon Valley-based artificial intelligence chip startup d-Matrix has raised $110 million from investors that include Microsoft Corp (MSFT.O) at a time when many chip companies are struggling to raise cash.\u003c/p\u003e\n\u003c/blockquote\u003e \n\n\u003cp\u003e\u003ca href=\"https://www.forbes.com/sites/karlfreund/2022/06/21/d-matrix-ai-chip-promises-efficient-transformer-processing/\"\u003eD-Matrix AI chip promises efficient transformer processing\u003c/a\u003e\u003c/p\u003e\n\u003cblockquote\u003e\n  \u003cp\u003eThe startup combines digital in-memory compute and chiplet implementations for data-center-grade inference.\u003c/p\u003e\n\u003c/blockquote\u003e \n \n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"AIChipCompilers\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch2\u003eAI Chip Compilers\u003c/h2\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cHR\u003e\n1. \u003ca href=\"https://github.com/pytorch/glow\"\u003epytorch/glow\u003c/a\u003e\u003cbr\u003e\n2. \u003ca href=\"https://tvm.ai/\"\u003eTVM:End to End Deep Learning Compiler Stack\u003c/a\u003e\u003cbr\u003e\n3. \u003ca href=\"https://www.tensorflow.org/xla\"\u003eGoogle Tensorflow XLA\u003c/a\u003e\u003cbr\u003e\n4. \u003ca href=\"https://developer.nvidia.com/tensorrt\"\u003eNvidia TensorRT\u003c/a\u003e\u003cbr\u003e\n5. \u003ca href=\"https://github.com/plaidml/plaidml\"\u003ePlaidML\u003c/a\u003e\u003cbr\u003e\n6. \u003ca href=\"https://github.com/NervanaSystems/ngraph\"\u003enGraph\u003c/a\u003e\u003cbr\u003e\n7. \u003ca href=\"https://github.com/Tiramisu-Compiler/tiramisu\"\u003eMIT Tiramisu compiler\u003c/a\u003e\u003cbr\u003e\n8. \u003ca href=\"https://onnc.ai/\"\u003eONNC (Open Neural Network Compiler)\u003c/a\u003e\u003cbr\u003e\n9. \u003ca href=\"https://mlir.llvm.org/\"\u003eMLIR: Multi-Level Intermediate Representation\u003c/a\u003e\u003cbr\u003e\n10. \u003ca href=\"http://tensor-compiler.org/\"\u003eThe Tensor Algebra Compiler (taco)\u003c/a\u003e\u003cbr\u003e\n11. \u003ca href=\"https://facebookresearch.github.io/TensorComprehensions/\"\u003eTensor Comprehensions\u003c/a\u003e\u003cbr\u003e\n12. \u003ca href=\"https://www.polymagelabs.com//\"\u003ePolyMage Labs\u003c/a\u003e\u003cbr\u003e\n13. \u003ca href=\"https://octoml.ai/\"\u003eOctoML\u003c/a\u003e\u003cbr\u003e\n14. \u003ca href=\"https://www.modular.com/\"\u003eModular AI\u003c/a\u003e\u003cbr\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"AIChipBenchmarks\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch2\u003eAI Chip Benchmarks\u003c/h2\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cHR\u003e\n\n1. \u003ca href=\"https://dawn.cs.stanford.edu/benchmark/index.html\"\u003eDAWNBench:An End-to-End Deep Learning Benchmark and Competition Image Classification (ImageNet)\u003c/a\u003e\u003cbr\u003e\n2. \u003ca href=\"https://github.com/rdadolf/fathom\"\u003eFathom:Reference workloads for modern deep learning methods\u003c/a\u003e\u003cbr\u003e\n3. \u003ca href=\"https://mlperf.org/\"\u003eMLPerf:A broad ML benchmark suite for measuring performance of ML software frameworks, ML hardware accelerators, and ML cloud platforms\u003c/a\u003e. \n\u003cstrong\u003eYou can find latest MLPerf results: training 2.1, HPC 2.0, inference tiny 1.0 \u003ca href=\"https://mlcommons.org/en/news/mlperf-training-4q2022/\"\u003ehere.\u003c/a\u003e\u003c/strong\u003e. \u003cbr\u003e\n\u003cstrong\u003eYou can find MLPerf inference results v2.1 \u003ca href=\"https://mlcommons.org/en/news/mlperf-inference-v21/\"\u003ehere.\u003c/a\u003e\u003c/strong\u003e. \u003cbr\u003e\n\u003cstrong\u003eYou can find MLPerf training results v1.0 \u003ca href=\"https://mlcommons.org/en/news/mlperf-training-2q2022/\"\u003ehere.\u003c/a\u003e\u003c/strong\u003e. \u003cbr\u003e\n\n4. \u003ca href=\"https://aimatrix.ai/en-us/index.html\"\u003eAI Matrix\u003c/a\u003e\u003cbr\u003e\n5. \u003ca href=\"http://ai-benchmark.com/index.html\"\u003eAI-Benchmark\u003c/a\u003e\u003cbr\u003e\n6. \u003ca href=\"https://github.com/AIIABenchmark/AIIA-DNN-benchmark\"\u003eAIIABenchmark\u003c/a\u003e\u003cbr\u003e\n7. \u003ca href=\"https://www.eembc.org/mlmark/\"\u003eEEMBC MLMark Benchmark\u003c/a\u003e\u003cbr\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003ca name=\"Reference\"\u003e\u003c/a\u003e\u003c/p\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch2\u003eReference\u003c/h2\u003e\u003c/div\u003e\n\n\u003cp\u003e\u003cHR\u003e\n\n\u003cdiv align=\"center\"\u003e\u003ch3\u003e \u003c/h3\u003e\u003c/div\u003e\n      \n1. \u003ca href=\"https://meanderful.blogspot.jp/2017/06/fpgas-and-ai-processors-dnn-and-cnn-for.html\"\u003eFPGAs and AI processors: DNN and CNN for all\u003c/a\u003e\u003cbr\u003e\n2. \u003ca href=\"http://www.nanalyze.com/2017/05/12-ai-hardware-startups-new-ai-chips/\"\u003e12 AI Hardware Startups Building New AI Chips\u003c/a\u003e\u003cbr\u003e\n3. \u003ca href=\"http://eyeriss.mit.edu/tutorial.html\"\u003eTutorial on Hardware Architectures for Deep Neural Networks\u003c/a\u003e\u003cbr\u003e\n4. \u003cstrong\u003e\u003ca href=\"https://nicsefc.ee.tsinghua.edu.cn/projects/neural-network-accelerator/\"\u003eNeural Network Accelerator Comparison\u003c/a\u003e\u003c/strong\u003e\u003cbr\u003e\n5. \"White Paper on AI Chip Technologies 2018\". You can download it from \u003ca href=\"https://cloud.tsinghua.edu.cn/f/9aa0a4f0a5684cc48495/?dl=1\"\u003ehere\u003c/a\u003e, or \u003ca href=\"https://drive.google.com/open?id=1ieDm0bpjVWl5MnSESRs92EcmoSzG5vcm\"\u003eGoogle drive.\u003c/a\u003e\u003cbr\u003e\n5. \u003cstrong\u003e\"What We Talk About When We Talk About AI Chip\". \u003ca href=\"https://mp.weixin.qq.com/s/SbX5yz5d3GXaLcl15DO6OQ\"\u003e#1\u003c/a\u003e,  \u003ca href=\"https://mp.weixin.qq.com/s/zvgDgKpIMIRLFUEW0fFOeg\"\u003e#2\u003c/a\u003e,  \u003ca href=\"https://mp.weixin.qq.com/s/CKHs5yblcMur4h2BwUBICw\"\u003e#3\u003c/a\u003e,  \u003ca href=\"https://mp.weixin.qq.com/s/hFnHhaWWYTFRUsD3HlMbLw\"\u003e#4\u003c/a\u003e\u003c/strong\u003e\u003cbr\u003e\n6. \u003cstrong\u003e\u003ca href=\"https://birenresearch.github.io/AIChip_Paper_List/\"\u003eAI Chip Paper List\u003c/a\u003e\u003c/strong\u003e\u003cbr\u003e\n7. \u003cstrong\u003e\u003ca href=\"https://khairy2011.medium.com/tpu-vs-gpu-vs-cerebras-vs-graphcore-a-fair-comparison-between-ml-hardware-3f5a19d89e38\"\u003eTPU vs GPU vs Cerebras vs Graphcore: A Fair Comparison between ML Hardware\u003c/a\u003e\u003c/strong\u003e\u003cbr\u003e\n\n\u003cdiv align=\"center\"\u003e\n\u003ca href=\"http://www.reliablecounter.com\" target=\"_blank\"\u003e\u003cimg src=\"http://www.reliablecounter.com/count.php?page=https://basicmi.github.io/Deep-Learning-Processor-List/\u0026digit=style/plain/3/\u0026reloads=1\" alt=\"laptop\" title=\"laptop\" border=\"0\"\u003e\u003c/a\u003e\n\u003c/div\u003e\n","funding_links":[],"categories":["PHP","What is not covered here?","硬件_其他"],"sub_categories":["网络服务_其他"],"project_url":"https://awesome.ecosyste.ms/api/v1/projects/github.com%2Fbasicmi%2FAI-Chip","html_url":"https://awesome.ecosyste.ms/projects/github.com%2Fbasicmi%2FAI-Chip","lists_url":"https://awesome.ecosyste.ms/api/v1/projects/github.com%2Fbasicmi%2FAI-Chip/lists"}