{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"Panda-70M","owner":"snap-research","isFork":false,"description":"[CVPR 2024] Panda-70M: Captioning 70M Videos with Multiple Cross-Modality Teachers","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":33,"starsCount":408,"forksCount":12,"license":null,"participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,24,22,2,0,9,2,0,0,4,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T21:07:48.524Z"}},{"type":"Public","name":"graphless-neural-networks","owner":"snap-research","isFork":false,"description":"[ICLR 2022] Code for Graph-less Neural Networks: Teaching Old MLPs New Tricks via Distillation (GLNN)","allTopics":["scalability","knowledge-distillation","efficient-inference","distillation","graph-algorithm","gnn","deep-learning","pytorch","graph-neural-networks"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":0,"starsCount":80,"forksCount":20,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-03T03:43:10.113Z"}},{"type":"Public","name":"LargeGT","owner":"snap-research","isFork":false,"description":"Graph Transformers for Large Graphs","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":16,"forksCount":4,"license":"Other","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,6,0,0,1,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-26T22:28:56.275Z"}},{"type":"Public","name":"MyVLM","owner":"snap-research","isFork":false,"description":"Official Implementation for \"MyVLM: Personalizing VLMs for User-Specific Queries\"","allTopics":["personalization","vision-language-models"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":129,"forksCount":6,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-26T03:13:11.391Z"}},{"type":"Public","name":"USE","owner":"snap-research","isFork":false,"description":"USE: Dynamic User Modeling with Stateful Sequence Models","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":2,"forksCount":0,"license":null,"participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-17T08:38:48.480Z"}},{"type":"Public","name":"MobileR2L","owner":"snap-research","isFork":false,"description":"[CVPR 2023] Real-Time Neural Light Field on Mobile Devices","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":190,"forksCount":13,"license":"Other","participation":[0,0,0,0,0,0,0,0,0,5,0,0,0,2,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-11-03T04:35:47.821Z"}},{"type":"Public","name":"qfar","owner":"snap-research","isFork":false,"description":"Official implementation of MobiCom 2023 paper \"QfaR: Location-Guided Scanning of Visual Codes from Long Distances\"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":2,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-10-19T15:51:16.083Z"}},{"type":"Public","name":"linkless-link-prediction","owner":"snap-research","isFork":false,"description":"[ICML 2023] Linkless Link Prediction via Relational Distillation","allTopics":["deep-learning","scalability","knowledge-distillation","link-prediction","efficient-inference","distillation","graph-neural-networks","gnn"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":15,"forksCount":6,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-10-04T18:01:21.560Z"}},{"type":"Public","name":"unsupervised-volumetric-animation","owner":"snap-research","isFork":false,"description":"The repository for paper Unsupervised Volumetric Animation","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":4,"starsCount":66,"forksCount":1,"license":"Other","participation":[6,0,0,0,0,2,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-09-22T16:57:12.978Z"}},{"type":"Public","name":"R2L","owner":"snap-research","isFork":false,"description":"[ECCV 2022] R2L: Distilling Neural Radiance Field to Neural Light Field for Efficient Novel View Synthesis","allTopics":["rendering","mlp","nerf","distillation","novel-view-synthesis","neural-light-field","deep-learning"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":185,"forksCount":23,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-08-15T04:59:36.973Z"}},{"type":"Public","name":"EfficientFormer","owner":"snap-research","isFork":false,"description":"EfficientFormerV2 [ICCV 2023] & EfficientFormer [NeurIPs 2022]","allTopics":["deep-learning","detection","transformers","pytorch","transformer","imagenet","semantic-segmentation","mobile-devices","efficient-inference","efficient-neural-networks"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":20,"starsCount":959,"forksCount":89,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-08-13T13:28:39.866Z"}},{"type":"Public","name":"discoscene","owner":"snap-research","isFork":false,"description":"CVPR 2023 Highlight: DiscoScene","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":4,"starsCount":137,"forksCount":2,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-08-01T06:35:34.286Z"}},{"type":"Public","name":"edit3d","owner":"snap-research","isFork":false,"description":"Code for Cross-Modal 3D Shape Generation and Manipulation (ECCV 2022)","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":25,"forksCount":7,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-05-23T15:56:47.499Z"}},{"type":"Public","name":"3dgp","owner":"snap-research","isFork":false,"description":"3D generation on ImageNet [ICLR 2023]","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":4,"starsCount":203,"forksCount":8,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-05-23T15:13:04.728Z"}},{"type":"Public","name":"NeROIC","owner":"snap-research","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":13,"starsCount":919,"forksCount":120,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-01-19T22:21:36.206Z"}},{"type":"Public","name":"non-contrastive-link-prediction","owner":"snap-research","isFork":false,"description":"[ICLR 2023] Link Prediction with Non-Contrastive Learning","allTopics":["scalability","link-prediction","self-supervision","graph-neural-networks","self-supervised-learning","gnn","efficient-training","non-contrastive-learning","deep-learning"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":25,"forksCount":1,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-01-13T02:55:05.261Z"}},{"type":"Public","name":"MMVID","owner":"snap-research","isFork":false,"description":"[CVPR 2022] Show Me What and Tell Me How: Video Synthesis via Multimodal Conditioning","allTopics":["deep-learning","transformer","bert","multimodal-learning","video-generation","text-to-video","multimodal-video-generation"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":4,"starsCount":190,"forksCount":21,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-06-16T18:12:47.516Z"}},{"type":"Public","name":"F8Net","owner":"snap-research","isFork":false,"description":"[ICLR 2022 Oral] F8Net: Fixed-Point 8-bit Only Multiplication for Network Quantization","allTopics":["deep-neural-networks","deep-learning","quantization","8-bit","efficient-neural-networks"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":95,"forksCount":14,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-05-05T17:03:01.129Z"}},{"type":"Public","name":"CAT","owner":"snap-research","isFork":false,"description":"[CVPR 2021] Teachers Do More Than Teach: Compressing Image-to-Image Models (CAT)","allTopics":["compression","pix2pix","image-to-image","cyclegan","gaugan","deep-learning","pytorch","gan"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":6,"starsCount":178,"forksCount":20,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-04-22T16:15:47.304Z"}},{"type":"Public","name":"MoCoGAN-HD","owner":"snap-research","isFork":false,"description":"[ICLR 2021 Spotlight] A Good Image Generator Is What You Need for High-Resolution Video Synthesis","allTopics":["deep-learning","video-generation","gan","image-generation"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":238,"forksCount":25,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-02-04T02:33:37.991Z"}},{"type":"Public","name":"NeurT-FDR","owner":"snap-research","isFork":false,"description":"NeurT-FDR, a method for controlling false discovery rate by incorporating feature hierarchy","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":2,"forksCount":1,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2021-02-01T17:21:53.875Z"}},{"type":"Public","name":"arielai_youtube_3d_hands","owner":"snap-research","isFork":false,"description":"A dataset for 3D hand reconstruction in the wild.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":8,"starsCount":185,"forksCount":15,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2020-07-31T10:07:27.180Z"}}],"repositoryCount":22,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}