Changes
On December 2, 2024 at 10:44:34 PM UTC, admin:
-
Changed value of field
doi_status
toTrue
in ANTNets: Mobile Convolutional Neural Networks for Resource Efficient Image Classification -
Changed value of field
doi_date_published
to2024-12-02
in ANTNets: Mobile Convolutional Neural Networks for Resource Efficient Image Classification -
Added resource Original Metadata to ANTNets: Mobile Convolutional Neural Networks for Resource Efficient Image Classification
f | 1 | { | f | 1 | { |
2 | "access_rights": "", | 2 | "access_rights": "", | ||
3 | "author": "Yunyang Xiong", | 3 | "author": "Yunyang Xiong", | ||
4 | "author_email": "", | 4 | "author_email": "", | ||
5 | "citation": [], | 5 | "citation": [], | ||
6 | "creator_user_id": "17755db4-395a-4b3b-ac09-e8e3484ca700", | 6 | "creator_user_id": "17755db4-395a-4b3b-ac09-e8e3484ca700", | ||
7 | "defined_in": "", | 7 | "defined_in": "", | ||
8 | "doi": "10.57702/gepvlxqx", | 8 | "doi": "10.57702/gepvlxqx", | ||
n | 9 | "doi_date_published": null, | n | 9 | "doi_date_published": "2024-12-02", |
10 | "doi_publisher": "TIB", | 10 | "doi_publisher": "TIB", | ||
n | 11 | "doi_status": false, | n | 11 | "doi_status": true, |
12 | "domain": "https://service.tib.eu/ldmservice", | 12 | "domain": "https://service.tib.eu/ldmservice", | ||
13 | "extra_authors": [ | 13 | "extra_authors": [ | ||
14 | { | 14 | { | ||
15 | "extra_author": "Hyunwoo J. Kim", | 15 | "extra_author": "Hyunwoo J. Kim", | ||
16 | "orcid": "" | 16 | "orcid": "" | ||
17 | }, | 17 | }, | ||
18 | { | 18 | { | ||
19 | "extra_author": "Varsha Hedau", | 19 | "extra_author": "Varsha Hedau", | ||
20 | "orcid": "" | 20 | "orcid": "" | ||
21 | } | 21 | } | ||
22 | ], | 22 | ], | ||
23 | "groups": [ | 23 | "groups": [ | ||
24 | { | 24 | { | ||
25 | "description": "", | 25 | "description": "", | ||
26 | "display_name": "Computer Vision", | 26 | "display_name": "Computer Vision", | ||
27 | "id": "d09caf7c-26c7-4e4d-bb8e-49476a90ba25", | 27 | "id": "d09caf7c-26c7-4e4d-bb8e-49476a90ba25", | ||
28 | "image_display_url": "", | 28 | "image_display_url": "", | ||
29 | "name": "computer-vision", | 29 | "name": "computer-vision", | ||
30 | "title": "Computer Vision" | 30 | "title": "Computer Vision" | ||
31 | }, | 31 | }, | ||
32 | { | 32 | { | ||
33 | "description": "", | 33 | "description": "", | ||
34 | "display_name": "Deep Learning", | 34 | "display_name": "Deep Learning", | ||
35 | "id": "d2734132-7098-4cc5-9f4c-5f9b6e1d7922", | 35 | "id": "d2734132-7098-4cc5-9f4c-5f9b6e1d7922", | ||
36 | "image_display_url": "", | 36 | "image_display_url": "", | ||
37 | "name": "deep-learning", | 37 | "name": "deep-learning", | ||
38 | "title": "Deep Learning" | 38 | "title": "Deep Learning" | ||
39 | } | 39 | } | ||
40 | ], | 40 | ], | ||
41 | "id": "ba26dde2-3d45-476b-a361-e283e23ea3d6", | 41 | "id": "ba26dde2-3d45-476b-a361-e283e23ea3d6", | ||
42 | "isopen": false, | 42 | "isopen": false, | ||
43 | "landing_page": "https://arxiv.org/abs/1807.11164", | 43 | "landing_page": "https://arxiv.org/abs/1807.11164", | ||
44 | "license_title": null, | 44 | "license_title": null, | ||
45 | "link_orkg": "", | 45 | "link_orkg": "", | ||
46 | "metadata_created": "2024-12-02T22:44:33.091133", | 46 | "metadata_created": "2024-12-02T22:44:33.091133", | ||
n | 47 | "metadata_modified": "2024-12-02T22:44:33.091139", | n | 47 | "metadata_modified": "2024-12-02T22:44:33.791456", |
48 | "name": | 48 | "name": | ||
49 | volutional-neural-networks-for-resource-ef-cient-image-classi-cation", | 49 | volutional-neural-networks-for-resource-ef-cient-image-classi-cation", | ||
50 | "notes": "Deep convolutional neural networks have achieved | 50 | "notes": "Deep convolutional neural networks have achieved | ||
51 | remarkable success in computer vision. However, deep neural networks | 51 | remarkable success in computer vision. However, deep neural networks | ||
52 | require large computing resources to achieve high performance. | 52 | require large computing resources to achieve high performance. | ||
53 | Although depthwise separable convolution can be an efficient module to | 53 | Although depthwise separable convolution can be an efficient module to | ||
54 | approximate a standard convolution, it often leads to reduced | 54 | approximate a standard convolution, it often leads to reduced | ||
55 | representational power of networks. In this paper, under budget | 55 | representational power of networks. In this paper, under budget | ||
56 | constraints such as computational cost (MAdds) and the parameter | 56 | constraints such as computational cost (MAdds) and the parameter | ||
57 | count, we propose a novel basic architectural block, ANTBlock. It | 57 | count, we propose a novel basic architectural block, ANTBlock. It | ||
58 | boosts the representational power by modeling, in a high dimensional | 58 | boosts the representational power by modeling, in a high dimensional | ||
59 | space, interdependency of channels between a depthwise convolution | 59 | space, interdependency of channels between a depthwise convolution | ||
60 | layer and a projection layer in the ANTBlocks. Our experiments show | 60 | layer and a projection layer in the ANTBlocks. Our experiments show | ||
61 | that ANTNet built by a sequence of ANTBlocks, consistently outperforms | 61 | that ANTNet built by a sequence of ANTBlocks, consistently outperforms | ||
62 | state-of-the-art low-cost mobile convolutional neural networks across | 62 | state-of-the-art low-cost mobile convolutional neural networks across | ||
63 | multiple datasets. On CIFAR100, our model achieves 75.7% top-1 | 63 | multiple datasets. On CIFAR100, our model achieves 75.7% top-1 | ||
64 | accuracy, which is 1.5% higher than MobileNetV2 with 8.3% fewer | 64 | accuracy, which is 1.5% higher than MobileNetV2 with 8.3% fewer | ||
65 | parameters and 19.6% less computational cost. On ImageNet, our model | 65 | parameters and 19.6% less computational cost. On ImageNet, our model | ||
66 | achieves 72.8% top-1 accuracy, which is 0.8% improvement, with 157.7ms | 66 | achieves 72.8% top-1 accuracy, which is 0.8% improvement, with 157.7ms | ||
67 | (20% faster) on iPhone 5s over MobileNetV2.", | 67 | (20% faster) on iPhone 5s over MobileNetV2.", | ||
n | 68 | "num_resources": 0, | n | 68 | "num_resources": 1, |
69 | "num_tags": 5, | 69 | "num_tags": 5, | ||
70 | "organization": { | 70 | "organization": { | ||
71 | "approval_status": "approved", | 71 | "approval_status": "approved", | ||
72 | "created": "2024-11-25T12:11:38.292601", | 72 | "created": "2024-11-25T12:11:38.292601", | ||
73 | "description": "", | 73 | "description": "", | ||
74 | "id": "079d46db-32df-4b48-91f3-0a8bc8f69559", | 74 | "id": "079d46db-32df-4b48-91f3-0a8bc8f69559", | ||
75 | "image_url": "", | 75 | "image_url": "", | ||
76 | "is_organization": true, | 76 | "is_organization": true, | ||
77 | "name": "no-organization", | 77 | "name": "no-organization", | ||
78 | "state": "active", | 78 | "state": "active", | ||
79 | "title": "No Organization", | 79 | "title": "No Organization", | ||
80 | "type": "organization" | 80 | "type": "organization" | ||
81 | }, | 81 | }, | ||
82 | "owner_org": "079d46db-32df-4b48-91f3-0a8bc8f69559", | 82 | "owner_org": "079d46db-32df-4b48-91f3-0a8bc8f69559", | ||
83 | "private": false, | 83 | "private": false, | ||
84 | "relationships_as_object": [], | 84 | "relationships_as_object": [], | ||
85 | "relationships_as_subject": [], | 85 | "relationships_as_subject": [], | ||
t | 86 | "resources": [], | t | 86 | "resources": [ |
87 | { | ||||
88 | "cache_last_updated": null, | ||||
89 | "cache_url": null, | ||||
90 | "created": "2024-12-02T22:29:38", | ||||
91 | "data": [ | ||||
92 | "dcterms:title", | ||||
93 | "dcterms:accessRights", | ||||
94 | "dcterms:creator", | ||||
95 | "dcterms:description", | ||||
96 | "dcterms:issued", | ||||
97 | "dcterms:language", | ||||
98 | "dcterms:identifier", | ||||
99 | "dcat:theme", | ||||
100 | "dcterms:type", | ||||
101 | "dcat:keyword", | ||||
102 | "dcat:landingPage", | ||||
103 | "dcterms:hasVersion", | ||||
104 | "dcterms:format", | ||||
105 | "mls:task" | ||||
106 | ], | ||||
107 | "description": "The json representation of the dataset with its | ||||
108 | distributions based on DCAT.", | ||||
109 | "format": "JSON", | ||||
110 | "hash": "", | ||||
111 | "id": "9eb5a9d7-f2e4-4586-a1d3-333af2f00349", | ||||
112 | "last_modified": "2024-12-02T22:44:33.783918", | ||||
113 | "metadata_modified": "2024-12-02T22:44:33.794245", | ||||
114 | "mimetype": "application/json", | ||||
115 | "mimetype_inner": null, | ||||
116 | "name": "Original Metadata", | ||||
117 | "package_id": "ba26dde2-3d45-476b-a361-e283e23ea3d6", | ||||
118 | "position": 0, | ||||
119 | "resource_type": null, | ||||
120 | "size": 1856, | ||||
121 | "state": "active", | ||||
122 | "url": | ||||
123 | resource/9eb5a9d7-f2e4-4586-a1d3-333af2f00349/download/metadata.json", | ||||
124 | "url_type": "upload" | ||||
125 | } | ||||
126 | ], | ||||
87 | "services_used_list": "", | 127 | "services_used_list": "", | ||
88 | "state": "active", | 128 | "state": "active", | ||
89 | "tags": [ | 129 | "tags": [ | ||
90 | { | 130 | { | ||
91 | "display_name": "ANTNet", | 131 | "display_name": "ANTNet", | ||
92 | "id": "d3730876-ca0e-4883-b264-d347c720a1d2", | 132 | "id": "d3730876-ca0e-4883-b264-d347c720a1d2", | ||
93 | "name": "ANTNet", | 133 | "name": "ANTNet", | ||
94 | "state": "active", | 134 | "state": "active", | ||
95 | "vocabulary_id": null | 135 | "vocabulary_id": null | ||
96 | }, | 136 | }, | ||
97 | { | 137 | { | ||
98 | "display_name": "CIFAR100", | 138 | "display_name": "CIFAR100", | ||
99 | "id": "68dc3acc-cef1-45da-8666-942409253a71", | 139 | "id": "68dc3acc-cef1-45da-8666-942409253a71", | ||
100 | "name": "CIFAR100", | 140 | "name": "CIFAR100", | ||
101 | "state": "active", | 141 | "state": "active", | ||
102 | "vocabulary_id": null | 142 | "vocabulary_id": null | ||
103 | }, | 143 | }, | ||
104 | { | 144 | { | ||
105 | "display_name": "Depthwise Separable Convolution", | 145 | "display_name": "Depthwise Separable Convolution", | ||
106 | "id": "b1bdb59d-7cfb-4d4f-82bd-0bd7012f64f3", | 146 | "id": "b1bdb59d-7cfb-4d4f-82bd-0bd7012f64f3", | ||
107 | "name": "Depthwise Separable Convolution", | 147 | "name": "Depthwise Separable Convolution", | ||
108 | "state": "active", | 148 | "state": "active", | ||
109 | "vocabulary_id": null | 149 | "vocabulary_id": null | ||
110 | }, | 150 | }, | ||
111 | { | 151 | { | ||
112 | "display_name": "ImageNet", | 152 | "display_name": "ImageNet", | ||
113 | "id": "114653a3-d688-42fb-8e76-350752af988b", | 153 | "id": "114653a3-d688-42fb-8e76-350752af988b", | ||
114 | "name": "ImageNet", | 154 | "name": "ImageNet", | ||
115 | "state": "active", | 155 | "state": "active", | ||
116 | "vocabulary_id": null | 156 | "vocabulary_id": null | ||
117 | }, | 157 | }, | ||
118 | { | 158 | { | ||
119 | "display_name": "MobileNetV2", | 159 | "display_name": "MobileNetV2", | ||
120 | "id": "1f906d45-2f51-4822-9daf-eb6d6194e53c", | 160 | "id": "1f906d45-2f51-4822-9daf-eb6d6194e53c", | ||
121 | "name": "MobileNetV2", | 161 | "name": "MobileNetV2", | ||
122 | "state": "active", | 162 | "state": "active", | ||
123 | "vocabulary_id": null | 163 | "vocabulary_id": null | ||
124 | } | 164 | } | ||
125 | ], | 165 | ], | ||
126 | "title": "ANTNets: Mobile Convolutional Neural Networks for Resource | 166 | "title": "ANTNets: Mobile Convolutional Neural Networks for Resource | ||
127 | Ef\ufb01cient Image Classi\ufb01cation", | 167 | Ef\ufb01cient Image Classi\ufb01cation", | ||
128 | "type": "dataset", | 168 | "type": "dataset", | ||
129 | "version": "" | 169 | "version": "" | ||
130 | } | 170 | } |