Changes
On December 16, 2024 at 7:39:37 PM UTC, admin:
-
Changed value of field
doi_status
toTrue
in APTQ: Attention-aware Post-Training Mixed-Precision Quantization for Large Language Models -
Changed value of field
doi_date_published
to2024-12-16
in APTQ: Attention-aware Post-Training Mixed-Precision Quantization for Large Language Models -
Added resource Original Metadata to APTQ: Attention-aware Post-Training Mixed-Precision Quantization for Large Language Models
f | 1 | { | f | 1 | { |
2 | "access_rights": "", | 2 | "access_rights": "", | ||
3 | "author": "Ziyi Guan", | 3 | "author": "Ziyi Guan", | ||
4 | "author_email": "", | 4 | "author_email": "", | ||
5 | "citation": [], | 5 | "citation": [], | ||
6 | "creator_user_id": "17755db4-395a-4b3b-ac09-e8e3484ca700", | 6 | "creator_user_id": "17755db4-395a-4b3b-ac09-e8e3484ca700", | ||
7 | "defined_in": "https://doi.org/10.1145/3649329.3658498", | 7 | "defined_in": "https://doi.org/10.1145/3649329.3658498", | ||
8 | "doi": "10.57702/i1dmoyg7", | 8 | "doi": "10.57702/i1dmoyg7", | ||
n | 9 | "doi_date_published": null, | n | 9 | "doi_date_published": "2024-12-16", |
10 | "doi_publisher": "TIB", | 10 | "doi_publisher": "TIB", | ||
n | 11 | "doi_status": false, | n | 11 | "doi_status": true, |
12 | "domain": "https://service.tib.eu/ldmservice", | 12 | "domain": "https://service.tib.eu/ldmservice", | ||
13 | "extra_authors": [ | 13 | "extra_authors": [ | ||
14 | { | 14 | { | ||
15 | "extra_author": "Hantao Huang", | 15 | "extra_author": "Hantao Huang", | ||
16 | "orcid": "" | 16 | "orcid": "" | ||
17 | }, | 17 | }, | ||
18 | { | 18 | { | ||
19 | "extra_author": "Yupeng Su", | 19 | "extra_author": "Yupeng Su", | ||
20 | "orcid": "" | 20 | "orcid": "" | ||
21 | }, | 21 | }, | ||
22 | { | 22 | { | ||
23 | "extra_author": "Hong Huang", | 23 | "extra_author": "Hong Huang", | ||
24 | "orcid": "" | 24 | "orcid": "" | ||
25 | }, | 25 | }, | ||
26 | { | 26 | { | ||
27 | "extra_author": "Ngai Wong", | 27 | "extra_author": "Ngai Wong", | ||
28 | "orcid": "" | 28 | "orcid": "" | ||
29 | }, | 29 | }, | ||
30 | { | 30 | { | ||
31 | "extra_author": "Hao Yu", | 31 | "extra_author": "Hao Yu", | ||
32 | "orcid": "" | 32 | "orcid": "" | ||
33 | } | 33 | } | ||
34 | ], | 34 | ], | ||
35 | "groups": [ | 35 | "groups": [ | ||
36 | { | 36 | { | ||
37 | "description": "", | 37 | "description": "", | ||
38 | "display_name": "Large Language Models", | 38 | "display_name": "Large Language Models", | ||
39 | "id": "246be871-ce2b-4021-8290-df0f88dec833", | 39 | "id": "246be871-ce2b-4021-8290-df0f88dec833", | ||
40 | "image_display_url": "", | 40 | "image_display_url": "", | ||
41 | "name": "large-language-models", | 41 | "name": "large-language-models", | ||
42 | "title": "Large Language Models" | 42 | "title": "Large Language Models" | ||
43 | }, | 43 | }, | ||
44 | { | 44 | { | ||
45 | "description": "", | 45 | "description": "", | ||
46 | "display_name": "Natural Language Processing", | 46 | "display_name": "Natural Language Processing", | ||
47 | "id": "5b974bcc-8f79-40fc-a05d-75b861d55ed0", | 47 | "id": "5b974bcc-8f79-40fc-a05d-75b861d55ed0", | ||
48 | "image_display_url": "", | 48 | "image_display_url": "", | ||
49 | "name": "natural-language-processing", | 49 | "name": "natural-language-processing", | ||
50 | "title": "Natural Language Processing" | 50 | "title": "Natural Language Processing" | ||
51 | }, | 51 | }, | ||
52 | { | 52 | { | ||
53 | "description": "", | 53 | "description": "", | ||
54 | "display_name": "Quantization", | 54 | "display_name": "Quantization", | ||
55 | "id": "ec002ec5-12d1-41bf-8875-df02c84bdb18", | 55 | "id": "ec002ec5-12d1-41bf-8875-df02c84bdb18", | ||
56 | "image_display_url": "", | 56 | "image_display_url": "", | ||
57 | "name": "quantization", | 57 | "name": "quantization", | ||
58 | "title": "Quantization" | 58 | "title": "Quantization" | ||
59 | } | 59 | } | ||
60 | ], | 60 | ], | ||
61 | "id": "841ee5fe-c944-41fd-a0ea-682c15c8f360", | 61 | "id": "841ee5fe-c944-41fd-a0ea-682c15c8f360", | ||
62 | "isopen": false, | 62 | "isopen": false, | ||
63 | "landing_page": "", | 63 | "landing_page": "", | ||
64 | "license_title": null, | 64 | "license_title": null, | ||
65 | "link_orkg": "", | 65 | "link_orkg": "", | ||
66 | "metadata_created": "2024-12-16T19:39:36.578779", | 66 | "metadata_created": "2024-12-16T19:39:36.578779", | ||
n | 67 | "metadata_modified": "2024-12-16T19:39:36.578784", | n | 67 | "metadata_modified": "2024-12-16T19:39:36.955781", |
68 | "name": | 68 | "name": | ||
69 | post-training-mixed-precision-quantization-for-large-language-models", | 69 | post-training-mixed-precision-quantization-for-large-language-models", | ||
70 | "notes": "Large Language Models (LLMs) have greatly advanced the | 70 | "notes": "Large Language Models (LLMs) have greatly advanced the | ||
71 | natural language processing paradigm. However, the high computational | 71 | natural language processing paradigm. However, the high computational | ||
72 | load and huge model sizes pose a grand challenge for deployment on | 72 | load and huge model sizes pose a grand challenge for deployment on | ||
73 | edge devices. To this end, we propose APTQ (Attention-aware | 73 | edge devices. To this end, we propose APTQ (Attention-aware | ||
74 | Post-Training Mixed-Precision Quantization) for LLMs, which considers | 74 | Post-Training Mixed-Precision Quantization) for LLMs, which considers | ||
75 | not only the second-order information of each layer\u2019s weights, | 75 | not only the second-order information of each layer\u2019s weights, | ||
76 | but also, for the first time, the nonlinear effect of attention | 76 | but also, for the first time, the nonlinear effect of attention | ||
77 | outputs on the entire model.", | 77 | outputs on the entire model.", | ||
n | 78 | "num_resources": 0, | n | 78 | "num_resources": 1, |
79 | "num_tags": 3, | 79 | "num_tags": 3, | ||
80 | "organization": { | 80 | "organization": { | ||
81 | "approval_status": "approved", | 81 | "approval_status": "approved", | ||
82 | "created": "2024-11-25T12:11:38.292601", | 82 | "created": "2024-11-25T12:11:38.292601", | ||
83 | "description": "", | 83 | "description": "", | ||
84 | "id": "079d46db-32df-4b48-91f3-0a8bc8f69559", | 84 | "id": "079d46db-32df-4b48-91f3-0a8bc8f69559", | ||
85 | "image_url": "", | 85 | "image_url": "", | ||
86 | "is_organization": true, | 86 | "is_organization": true, | ||
87 | "name": "no-organization", | 87 | "name": "no-organization", | ||
88 | "state": "active", | 88 | "state": "active", | ||
89 | "title": "No Organization", | 89 | "title": "No Organization", | ||
90 | "type": "organization" | 90 | "type": "organization" | ||
91 | }, | 91 | }, | ||
92 | "owner_org": "079d46db-32df-4b48-91f3-0a8bc8f69559", | 92 | "owner_org": "079d46db-32df-4b48-91f3-0a8bc8f69559", | ||
93 | "private": false, | 93 | "private": false, | ||
94 | "relationships_as_object": [], | 94 | "relationships_as_object": [], | ||
95 | "relationships_as_subject": [], | 95 | "relationships_as_subject": [], | ||
t | 96 | "resources": [], | t | 96 | "resources": [ |
97 | { | ||||
98 | "cache_last_updated": null, | ||||
99 | "cache_url": null, | ||||
100 | "created": "2024-12-16T18:25:42", | ||||
101 | "data": [ | ||||
102 | "dcterms:title", | ||||
103 | "dcterms:accessRights", | ||||
104 | "dcterms:creator", | ||||
105 | "dcterms:description", | ||||
106 | "dcterms:issued", | ||||
107 | "dcterms:language", | ||||
108 | "dcterms:identifier", | ||||
109 | "dcat:theme", | ||||
110 | "dcterms:type", | ||||
111 | "dcat:keyword", | ||||
112 | "dcat:landingPage", | ||||
113 | "dcterms:hasVersion", | ||||
114 | "dcterms:format", | ||||
115 | "mls:task", | ||||
116 | "datacite:isDescribedBy" | ||||
117 | ], | ||||
118 | "description": "The json representation of the dataset with its | ||||
119 | distributions based on DCAT.", | ||||
120 | "format": "JSON", | ||||
121 | "hash": "", | ||||
122 | "id": "8f35cb7f-8264-4115-bff3-c2fbea6daae2", | ||||
123 | "last_modified": "2024-12-16T19:39:36.948508", | ||||
124 | "metadata_modified": "2024-12-16T19:39:36.958684", | ||||
125 | "mimetype": "application/json", | ||||
126 | "mimetype_inner": null, | ||||
127 | "name": "Original Metadata", | ||||
128 | "package_id": "841ee5fe-c944-41fd-a0ea-682c15c8f360", | ||||
129 | "position": 0, | ||||
130 | "resource_type": null, | ||||
131 | "size": 1232, | ||||
132 | "state": "active", | ||||
133 | "url": | ||||
134 | resource/8f35cb7f-8264-4115-bff3-c2fbea6daae2/download/metadata.json", | ||||
135 | "url_type": "upload" | ||||
136 | } | ||||
137 | ], | ||||
97 | "services_used_list": "", | 138 | "services_used_list": "", | ||
98 | "state": "active", | 139 | "state": "active", | ||
99 | "tags": [ | 140 | "tags": [ | ||
100 | { | 141 | { | ||
101 | "display_name": "Attention-aware", | 142 | "display_name": "Attention-aware", | ||
102 | "id": "04520bc1-d128-4000-95f4-6d8dfb2899b7", | 143 | "id": "04520bc1-d128-4000-95f4-6d8dfb2899b7", | ||
103 | "name": "Attention-aware", | 144 | "name": "Attention-aware", | ||
104 | "state": "active", | 145 | "state": "active", | ||
105 | "vocabulary_id": null | 146 | "vocabulary_id": null | ||
106 | }, | 147 | }, | ||
107 | { | 148 | { | ||
108 | "display_name": "Large Language Models", | 149 | "display_name": "Large Language Models", | ||
109 | "id": "6e366b4a-402d-4c0e-b818-6fec429fc72f", | 150 | "id": "6e366b4a-402d-4c0e-b818-6fec429fc72f", | ||
110 | "name": "Large Language Models", | 151 | "name": "Large Language Models", | ||
111 | "state": "active", | 152 | "state": "active", | ||
112 | "vocabulary_id": null | 153 | "vocabulary_id": null | ||
113 | }, | 154 | }, | ||
114 | { | 155 | { | ||
115 | "display_name": "Post-Training Mixed-Precision Quantization", | 156 | "display_name": "Post-Training Mixed-Precision Quantization", | ||
116 | "id": "4e0c5e35-9939-424d-90e3-2f6a3b3a5a0a", | 157 | "id": "4e0c5e35-9939-424d-90e3-2f6a3b3a5a0a", | ||
117 | "name": "Post-Training Mixed-Precision Quantization", | 158 | "name": "Post-Training Mixed-Precision Quantization", | ||
118 | "state": "active", | 159 | "state": "active", | ||
119 | "vocabulary_id": null | 160 | "vocabulary_id": null | ||
120 | } | 161 | } | ||
121 | ], | 162 | ], | ||
122 | "title": "APTQ: Attention-aware Post-Training Mixed-Precision | 163 | "title": "APTQ: Attention-aware Post-Training Mixed-Precision | ||
123 | Quantization for Large Language Models", | 164 | Quantization for Large Language Models", | ||
124 | "type": "dataset", | 165 | "type": "dataset", | ||
125 | "version": "" | 166 | "version": "" | ||
126 | } | 167 | } |