Dataset Groups Activity Stream HumanEvalFix HumanEvalFix is a benchmark for evaluating the performance of code repair models. BibTex: @dataset{Muennighoff_et_al_and_Luo_et_al_and_Zhang_et_al_and_Wang_et_al_2024, abstract = {HumanEvalFix is a benchmark for evaluating the performance of code repair models.}, author = {Muennighoff et al. and Luo et al. and Zhang et al. and Wang et al.}, doi = {10.57702/het8jucq}, institution = {No Organization}, keyword = {'Code Evaluation', 'Code Repair', 'HumanEvalFix'}, month = {dec}, publisher = {TIB}, title = {HumanEvalFix}, url = {https://service.tib.eu/ldmservice/dataset/humanevalfix}, year = {2024} }