{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"xFinder","owner":"IAAR-Shanghai","isFork":false,"description":"xFinder: Robust and Pinpoint Answer Extraction for Large Language Models","allTopics":["benchmark","regex","reliability","evaluation","dataset","gpt","large-language-models","llm","open-compass","lm-evaluation","xfinder","reliable-evaluation","key-answer-extraction"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":44,"forksCount":1,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-31T15:15:45.018Z"}},{"type":"Public","name":"PGRAG","owner":"IAAR-Shanghai","isFork":false,"description":"PGRAG","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":13,"forksCount":0,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,9,0,21,0,8,0,0,0,0,0,0,0,0,0,0,0,0,3,30],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-30T02:52:23.722Z"}},{"type":"Public","name":"NewsBench","owner":"IAAR-Shanghai","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":21,"forksCount":0,"license":null,"participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-28T13:17:47.020Z"}},{"type":"Public","name":"UHGEval","owner":"IAAR-Shanghai","isFork":false,"description":"[ACL 2024] Benchmarking the Hallucination of Chinese Large Language Models via Unconstrained Generation","allTopics":["benchmark","framework","evaluation","dataset","hallucination","aquila","unconstrained","baichuan","gpt-3","hallucinations","gpt-4","large-language-models","llm","chatgpt","chatglm","internlm","qwen","hallucination-detection","truthfulqa","acl2024"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":162,"forksCount":17,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,26,4,7,0,3,0,3,5,2,37,2,0,0,0,8,1,2,17,7,1,0,0,0,0,0,0,0,1,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-27T11:51:54.044Z"}},{"type":"Public","name":"DATG","owner":"IAAR-Shanghai","isFork":false,"description":"[ACL 2024]Controlled Text Generation for Large Language Model with Dynamic Attribute Graphs","allTopics":["graph","pagerank","inference","text-generation","fudge","controlled-text-generation","large-language-models","llms","controllable-text-generation","preadd"],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":0,"starsCount":22,"forksCount":2,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-23T04:53:14.643Z"}},{"type":"Public","name":"CRUD_RAG","owner":"IAAR-Shanghai","isFork":false,"description":"CRUD-RAG: A Comprehensive Chinese Benchmark for Retrieval-Augmented Generation of Large Language Models","allTopics":["benchmark","large-language-models","retrieval-augmented-generation"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":136,"forksCount":15,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-26T01:32:54.922Z"}},{"type":"Public","name":"Grimoire","owner":"IAAR-Shanghai","isFork":false,"description":"Grimoire is All You Need for Enhancing Large Language Models","allTopics":["grimoire","llama","datasets","icl","phi2","baichuan","gpt-4","in-context-learning","llm","chatgpt"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":108,"forksCount":9,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,24,0,8,3,5,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-02-29T05:44:07.704Z"}},{"type":"Public","name":"UHGEval-dataset","owner":"IAAR-Shanghai","isFork":false,"description":"The full pipeline of creating UHGEval hallucination dataset","allTopics":["benchmark","pipeline","evaluation","dataset","unconstrained","hallucinations","large-language-models","llm","chatgpt","uhgeval"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":7,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-02-15T15:21:53.569Z"}}],"repositoryCount":8,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}