{"payload":{"header_redesign_enabled":false,"results":[{"id":"701430060","archived":false,"color":"#3572A5","followers":220,"has_funding_file":false,"hl_name":"LLM-Tuning-Safety/LLMs-Finetuning-Safety","hl_trunc_description":"We jailbreak GPT-3.5 Turbo’s safety guardrails by fine-tuning it on only 10 adversarially designed examples, at a cost of less than $0.20…","language":"Python","mirror":false,"owned_by_organization":false,"public":true,"repo":{"repository":{"id":701430060,"name":"LLMs-Finetuning-Safety","owner_id":146881603,"owner_login":"LLM-Tuning-Safety","updated_at":"2024-02-23T21:19:44.994Z","has_issues":true}},"sponsorable":false,"topics":["alignment","llm","llm-finetuning"],"type":"Public","help_wanted_issues_count":0,"good_first_issue_issues_count":0,"starred_by_current_user":false}],"type":"repositories","page":1,"page_count":1,"elapsed_millis":89,"errors":[],"result_count":1,"facets":[],"protected_org_logins":[],"topics":null,"query_id":"","logged_in":false,"sign_up_path":"/signup?source=code_search_results","sign_in_path":"/login?return_to=https%3A%2F%2Fgithub.com%2Fsearch%3Fq%3Drepo%253ALLM-Tuning-Safety%252FLLMs-Finetuning-Safety%2B%2Blanguage%253APython","metadata":null,"warn_limited_results":false,"csrf_tokens":{"/LLM-Tuning-Safety/LLMs-Finetuning-Safety/star":{"post":"fcu3rEv6HkSqS96NamceUW8XeuhZQMsXLC5JysrjqItoyW9iBWGFfv6Y7MMMNg-oCm9_OyM4eI3i45Ee-uj4lw"},"/LLM-Tuning-Safety/LLMs-Finetuning-Safety/unstar":{"post":"8M_QrRq6hHkFIRB9a0N152YgHzPycxdPDgexw_LCIwICZPZHVzgQolmKRmVkSyTaaTDe4085rfHS0hNr2fkV6w"},"/sponsors/batch_deferred_sponsor_buttons":{"post":"voFHK56SHEekLBYQaGG4MnnpkQgrb0ZLzbyaUWkcsnd9b9Cg5O7vu_3nL833Eh31XgxrmZTp1xUEEuxDOxOOUg"}}},"title":"Repository search results"}