{"payload":{"header_redesign_enabled":false,"results":[{"id":"727470807","archived":false,"color":"#f34b7d","followers":946,"has_funding_file":false,"hl_name":"b4rtaz/distributed-llama","hl_trunc_description":"Tensor parallelism is all you need. Run LLMs on weak devices or make powerful devices even more powerful by distributing the workload and…","language":"C++","mirror":false,"owned_by_organization":false,"public":true,"repo":{"repository":{"id":727470807,"name":"distributed-llama","owner_id":12797776,"owner_login":"b4rtaz","updated_at":"2024-06-07T23:14:20.353Z","has_issues":true}},"sponsorable":true,"topics":["neural-network","distributed-computing","llm","llms","open-llm","llm-inference","llama2","distributed-llm","llama3"],"type":"Public","help_wanted_issues_count":0,"good_first_issue_issues_count":0,"starred_by_current_user":false}],"type":"repositories","page":1,"page_count":1,"elapsed_millis":86,"errors":[],"result_count":1,"facets":[],"protected_org_logins":[],"topics":null,"query_id":"","logged_in":false,"sign_up_path":"/signup?source=code_search_results","sign_in_path":"/login?return_to=https%3A%2F%2Fgithub.com%2Fsearch%3Fq%3Drepo%253Ab4rtaz%252Fdistributed-llama%2B%2Blanguage%253AC%252B%252B","metadata":null,"csrf_tokens":{"/b4rtaz/distributed-llama/star":{"post":"WRly-fZivIXFEV3CXvCUWzO26ASRA1F0IciAiTbtFl8pmuD04YKcEHkT93zF_v2UogOLQEHUyZoVUi1NiIl3Zg"},"/b4rtaz/distributed-llama/unstar":{"post":"U721oLMP_ps3TTpdNt3aaJdgEpkSQIP2sq_DbP2DN5OtTZD1SnTSTN1idRI03ORbnDETz42_h0xz3eGJkow9mg"},"/sponsors/batch_deferred_sponsor_buttons":{"post":"8osSyvClbUt92G_2kt3TAAXdzhMQ1ggHcuDxihugL2GGm-KQJtq3ddcMYSQNGKPf1lUxS5vpZ-L9AHZkmNM3RQ"}}},"title":"Repository search results"}