Changes
On August 4, 2023 at 8:46:54 AM UTC, admin:
-
Set author of VisE-D: Visual Event Classification Dataset to Eric Müller-Budack (previously Eric Müller-Budack, Matthias Springstein, Sherzod Hakimov, Kevin Mrutzek, Ralph Ewerth)
f | 1 | { | f | 1 | { |
n | 2 | "author": "Eric M\u00fcller-Budack, Matthias Springstein, Sherzod | n | 2 | "author": "Eric M\u00fcller-Budack", |
3 | Hakimov, Kevin Mrutzek, Ralph Ewerth", | ||||
4 | "author_email": "eric.mueller@tib.eu", | 3 | "author_email": "eric.mueller@tib.eu", | ||
5 | "creator_user_id": "17755db4-395a-4b3b-ac09-e8e3484ca700", | 4 | "creator_user_id": "17755db4-395a-4b3b-ac09-e8e3484ca700", | ||
6 | "doi": "10.25835/0072909", | 5 | "doi": "10.25835/0072909", | ||
7 | "doi_date_published": "2020-11-11", | 6 | "doi_date_published": "2020-11-11", | ||
8 | "doi_publisher": "LUIS", | 7 | "doi_publisher": "LUIS", | ||
9 | "doi_status": "true", | 8 | "doi_status": "true", | ||
10 | "domain": "https://data.uni-hannover.de", | 9 | "domain": "https://data.uni-hannover.de", | ||
n | n | 10 | "extra_authors": [ | ||
11 | { | ||||
12 | "extra_author": " Matthias Springstein" | ||||
13 | }, | ||||
14 | { | ||||
15 | "extra_author": " Sherzod Hakimov" | ||||
16 | }, | ||||
17 | { | ||||
18 | "extra_author": " Kevin Mrutzek" | ||||
19 | }, | ||||
20 | { | ||||
21 | "extra_author": " Ralph Ewerth" | ||||
22 | } | ||||
23 | ], | ||||
11 | "groups": [], | 24 | "groups": [], | ||
12 | "have_copyright": "Yes", | 25 | "have_copyright": "Yes", | ||
13 | "id": "3afb333d-230f-4829-91bb-d4dd41bfdcfa", | 26 | "id": "3afb333d-230f-4829-91bb-d4dd41bfdcfa", | ||
14 | "isopen": false, | 27 | "isopen": false, | ||
15 | "license_id": "CC-BY-NC-3.0", | 28 | "license_id": "CC-BY-NC-3.0", | ||
16 | "license_title": "CC-BY-NC-3.0", | 29 | "license_title": "CC-BY-NC-3.0", | ||
17 | "maintainer": "Eric M\u00fcller-Budack", | 30 | "maintainer": "Eric M\u00fcller-Budack", | ||
18 | "maintainer_email": "eric.mueller@tib.eu", | 31 | "maintainer_email": "eric.mueller@tib.eu", | ||
19 | "metadata_created": "2021-10-14T10:16:09.918635", | 32 | "metadata_created": "2021-10-14T10:16:09.918635", | ||
n | 20 | "metadata_modified": "2023-01-12T13:14:29.972774", | n | 33 | "metadata_modified": "2023-08-04T08:46:53.962720", |
21 | "name": "luh-vise", | 34 | "name": "luh-vise", | ||
22 | "notes": "# VisE-D: Visual Event Classification Dataset\r\n\r\nThis | 35 | "notes": "# VisE-D: Visual Event Classification Dataset\r\n\r\nThis | ||
23 | repository contains the *Visual Event Classification Dataset (VisE-D)* | 36 | repository contains the *Visual Event Classification Dataset (VisE-D)* | ||
24 | introduced in the paper:\r\n\r\n> Eric M\u00fcller-Budack, Matthias | 37 | introduced in the paper:\r\n\r\n> Eric M\u00fcller-Budack, Matthias | ||
25 | Springstein, Sherzod Hakimov, Kevin Mrutzek, and Ralph | 38 | Springstein, Sherzod Hakimov, Kevin Mrutzek, and Ralph | ||
26 | Ewerth:\r\n\"Ontology-driven Event Type Classification in | 39 | Ewerth:\r\n\"Ontology-driven Event Type Classification in | ||
27 | Images\".\r\nIn: *IEEE Winter Conference on Applications of Computer | 40 | Images\".\r\nIn: *IEEE Winter Conference on Applications of Computer | ||
28 | Vision (WACV)*, IEEE, 2021.\r\n\r\n\r\n## Content \r\n\r\n- | 41 | Vision (WACV)*, IEEE, 2021.\r\n\r\n\r\n## Content \r\n\r\n- | ||
29 | VisE-D.tar.gz:\r\n - List of image URLs with corresponding meta | 42 | VisE-D.tar.gz:\r\n - List of image URLs with corresponding meta | ||
30 | information for the:\r\n - *VisE-D* train dataset\r\n - | 43 | information for the:\r\n - *VisE-D* train dataset\r\n - | ||
31 | *VisE-D* validation dataset\r\n - *VisE-Bing* test dataset\r\n | 44 | *VisE-D* validation dataset\r\n - *VisE-Bing* test dataset\r\n | ||
32 | - *VisE-Bing* test dataset\r\n - Different versions of the *Visual | 45 | - *VisE-Bing* test dataset\r\n - Different versions of the *Visual | ||
33 | Event Ontology (VisE-O)*:\r\n - Initial Ontology\r\n - | 46 | Event Ontology (VisE-O)*:\r\n - Initial Ontology\r\n - | ||
34 | Disambiguated Ontology\r\n - Disambigauted Ontology for *Event | 47 | Disambiguated Ontology\r\n - Disambigauted Ontology for *Event | ||
35 | Nodes* related to a minimum of ten *Events*\r\n - Refined | 48 | Nodes* related to a minimum of ten *Events*\r\n - Refined | ||
36 | Ontology\r\n- VisE-C.tar.gz: Classification baseline (denoted as C) | 49 | Ontology\r\n- VisE-C.tar.gz: Classification baseline (denoted as C) | ||
37 | based on the ResNet-50 architecture trained on *VisE-D*.\r\n- | 50 | based on the ResNet-50 architecture trained on *VisE-D*.\r\n- | ||
38 | VisE-CO_cel.tar.gz: Ontology-driven approach with redundancy removal | 51 | VisE-CO_cel.tar.gz: Ontology-driven approach with redundancy removal | ||
39 | (denoted as CO^{cel}_{6\\omega}-RR) based on the ResNet-50 | 52 | (denoted as CO^{cel}_{6\\omega}-RR) based on the ResNet-50 | ||
40 | architecture trained on *VisE-D*.\r\n- VisE-CO_cos.tar.gz: | 53 | architecture trained on *VisE-D*.\r\n- VisE-CO_cos.tar.gz: | ||
41 | Ontology-driven approach (denoted as CO^{cos}_{\\delta}) based on the | 54 | Ontology-driven approach (denoted as CO^{cos}_{\\delta}) based on the | ||
42 | ResNet-50 architecture trained on *VisE-D*.\r\n- RED.tar.gz: | 55 | ResNet-50 architecture trained on *VisE-D*.\r\n- RED.tar.gz: | ||
43 | Ontologies and splits for the *RED* benchmark datasets (Ahsan et al. | 56 | Ontologies and splits for the *RED* benchmark datasets (Ahsan et al. | ||
44 | 2017)\r\n- SocEID.tar.gz: Ontologies and splits for the *SocEID* | 57 | 2017)\r\n- SocEID.tar.gz: Ontologies and splits for the *SocEID* | ||
45 | benchmark datasets (Ahsan et al. 2017)\r\n- WIDER.tar.gz: Ontologies | 58 | benchmark datasets (Ahsan et al. 2017)\r\n- WIDER.tar.gz: Ontologies | ||
46 | and splits for the *WIDER* benchmark datasets (Xiong et al. 2015) | 59 | and splits for the *WIDER* benchmark datasets (Xiong et al. 2015) | ||
47 | \r\n\r\n\r\n## Source Code\r\n\r\nThe source code to reproduce our | 60 | \r\n\r\n\r\n## Source Code\r\n\r\nThe source code to reproduce our | ||
48 | results can be found on our GitHub page: | 61 | results can be found on our GitHub page: | ||
49 | https://github.com/TIBHannover/VisE", | 62 | https://github.com/TIBHannover/VisE", | ||
50 | "num_resources": 7, | 63 | "num_resources": 7, | ||
51 | "num_tags": 5, | 64 | "num_tags": 5, | ||
52 | "organization": { | 65 | "organization": { | ||
53 | "approval_status": "approved", | 66 | "approval_status": "approved", | ||
54 | "created": "2017-11-23T17:30:37.757128", | 67 | "created": "2017-11-23T17:30:37.757128", | ||
55 | "description": "The German National Library of Science and | 68 | "description": "The German National Library of Science and | ||
56 | Technology, abbreviated TIB, is the national library of the Federal | 69 | Technology, abbreviated TIB, is the national library of the Federal | ||
57 | Republic of Germany for all fields of engineering, technology, and the | 70 | Republic of Germany for all fields of engineering, technology, and the | ||
58 | natural sciences.", | 71 | natural sciences.", | ||
59 | "id": "0c5362f5-b99e-41db-8256-3d0d7549bf4d", | 72 | "id": "0c5362f5-b99e-41db-8256-3d0d7549bf4d", | ||
60 | "image_url": | 73 | "image_url": | ||
61 | 3conf/ext/tib_tmpl_bootstrap/Resources/Public/images/TIB_Logo_en.png", | 74 | 3conf/ext/tib_tmpl_bootstrap/Resources/Public/images/TIB_Logo_en.png", | ||
62 | "is_organization": true, | 75 | "is_organization": true, | ||
63 | "name": "tib", | 76 | "name": "tib", | ||
64 | "state": "active", | 77 | "state": "active", | ||
65 | "title": "TIB", | 78 | "title": "TIB", | ||
66 | "type": "organization" | 79 | "type": "organization" | ||
67 | }, | 80 | }, | ||
68 | "owner_org": "0c5362f5-b99e-41db-8256-3d0d7549bf4d", | 81 | "owner_org": "0c5362f5-b99e-41db-8256-3d0d7549bf4d", | ||
69 | "private": false, | 82 | "private": false, | ||
70 | "relationships_as_object": [], | 83 | "relationships_as_object": [], | ||
71 | "relationships_as_subject": [], | 84 | "relationships_as_subject": [], | ||
72 | "repository_name": "Leibniz University Hannover", | 85 | "repository_name": "Leibniz University Hannover", | ||
73 | "resources": [ | 86 | "resources": [ | ||
74 | { | 87 | { | ||
75 | "cache_last_updated": null, | 88 | "cache_last_updated": null, | ||
76 | "cache_url": null, | 89 | "cache_url": null, | ||
77 | "created": "2020-11-11T17:16:28.023107", | 90 | "created": "2020-11-11T17:16:28.023107", | ||
78 | "description": "*Visual Event Ontology (VisE-O)*, meta | 91 | "description": "*Visual Event Ontology (VisE-O)*, meta | ||
79 | information and image links for the *Visual Event Classification | 92 | information and image links for the *Visual Event Classification | ||
80 | Dataset (VisE-D)* including the splits for training and validation as | 93 | Dataset (VisE-D)* including the splits for training and validation as | ||
81 | well as both test datasets VisE-Bing and VisE-Wiki.", | 94 | well as both test datasets VisE-Bing and VisE-Wiki.", | ||
82 | "format": "TAR", | 95 | "format": "TAR", | ||
83 | "hash": "", | 96 | "hash": "", | ||
84 | "id": "99ce7e4d-df5b-40f6-afb4-16085dbf697d", | 97 | "id": "99ce7e4d-df5b-40f6-afb4-16085dbf697d", | ||
85 | "last_modified": "2020-11-11T17:16:27.986008", | 98 | "last_modified": "2020-11-11T17:16:27.986008", | ||
n | 86 | "metadata_modified": "2023-01-12T13:14:29.976846", | n | 99 | "metadata_modified": "2023-08-04T08:46:53.966223", |
87 | "mimetype": "application/x-tar", | 100 | "mimetype": "application/x-tar", | ||
88 | "mimetype_inner": null, | 101 | "mimetype_inner": null, | ||
89 | "name": "VisE-D.tar.gz", | 102 | "name": "VisE-D.tar.gz", | ||
90 | "package_id": "3afb333d-230f-4829-91bb-d4dd41bfdcfa", | 103 | "package_id": "3afb333d-230f-4829-91bb-d4dd41bfdcfa", | ||
91 | "position": 0, | 104 | "position": 0, | ||
92 | "resource_type": null, | 105 | "resource_type": null, | ||
93 | "size": 70031264, | 106 | "size": 70031264, | ||
94 | "state": "active", | 107 | "state": "active", | ||
95 | "url": | 108 | "url": | ||
96 | resource/99ce7e4d-df5b-40f6-afb4-16085dbf697d/download/vise-d.tar.gz", | 109 | resource/99ce7e4d-df5b-40f6-afb4-16085dbf697d/download/vise-d.tar.gz", | ||
97 | "url_type": "" | 110 | "url_type": "" | ||
98 | }, | 111 | }, | ||
99 | { | 112 | { | ||
100 | "cache_last_updated": null, | 113 | "cache_last_updated": null, | ||
101 | "cache_url": null, | 114 | "cache_url": null, | ||
102 | "created": "2020-11-11T17:20:24.136308", | 115 | "created": "2020-11-11T17:20:24.136308", | ||
103 | "description": "Ontology, corresponding meta information, and | 116 | "description": "Ontology, corresponding meta information, and | ||
104 | splits for the *RED* dataset introduced by:\r\n\r\nUnaiza Ahsan, Chen | 117 | splits for the *RED* dataset introduced by:\r\n\r\nUnaiza Ahsan, Chen | ||
105 | Sun, James Hays, Irfan A. Essa: \"Complex event recognition from | 118 | Sun, James Hays, Irfan A. Essa: \"Complex event recognition from | ||
106 | images with few training examples.\" 2017 IEEE Winter Conference on | 119 | images with few training examples.\" 2017 IEEE Winter Conference on | ||
107 | Applications of Computer Vision (WACV). IEEE, 2017.", | 120 | Applications of Computer Vision (WACV). IEEE, 2017.", | ||
108 | "format": "TAR", | 121 | "format": "TAR", | ||
109 | "hash": "", | 122 | "hash": "", | ||
110 | "id": "d0f5cd8b-7c3e-4055-9810-f9cba2b69a33", | 123 | "id": "d0f5cd8b-7c3e-4055-9810-f9cba2b69a33", | ||
111 | "last_modified": "2020-11-11T17:20:24.098089", | 124 | "last_modified": "2020-11-11T17:20:24.098089", | ||
n | 112 | "metadata_modified": "2023-01-12T13:14:29.977016", | n | 125 | "metadata_modified": "2023-08-04T08:46:53.966360", |
113 | "mimetype": "application/x-tar", | 126 | "mimetype": "application/x-tar", | ||
114 | "mimetype_inner": null, | 127 | "mimetype_inner": null, | ||
115 | "name": "RED.tar.gz", | 128 | "name": "RED.tar.gz", | ||
116 | "package_id": "3afb333d-230f-4829-91bb-d4dd41bfdcfa", | 129 | "package_id": "3afb333d-230f-4829-91bb-d4dd41bfdcfa", | ||
117 | "position": 1, | 130 | "position": 1, | ||
118 | "resource_type": null, | 131 | "resource_type": null, | ||
119 | "size": 241357, | 132 | "size": 241357, | ||
120 | "state": "active", | 133 | "state": "active", | ||
121 | "url": | 134 | "url": | ||
122 | fa/resource/d0f5cd8b-7c3e-4055-9810-f9cba2b69a33/download/red.tar.gz", | 135 | fa/resource/d0f5cd8b-7c3e-4055-9810-f9cba2b69a33/download/red.tar.gz", | ||
123 | "url_type": "" | 136 | "url_type": "" | ||
124 | }, | 137 | }, | ||
125 | { | 138 | { | ||
126 | "cache_last_updated": null, | 139 | "cache_last_updated": null, | ||
127 | "cache_url": null, | 140 | "cache_url": null, | ||
128 | "created": "2020-11-11T17:21:13.466242", | 141 | "created": "2020-11-11T17:21:13.466242", | ||
129 | "description": "Ontology, corresponding meta information, and | 142 | "description": "Ontology, corresponding meta information, and | ||
130 | splits for the *SocEID* dataset introduced by:\r\n\r\nUnaiza Ahsan, | 143 | splits for the *SocEID* dataset introduced by:\r\n\r\nUnaiza Ahsan, | ||
131 | Chen Sun, James Hays, Irfan A. Essa: \"Complex event recognition from | 144 | Chen Sun, James Hays, Irfan A. Essa: \"Complex event recognition from | ||
132 | images with few training examples.\" 2017 IEEE Winter Conference on | 145 | images with few training examples.\" 2017 IEEE Winter Conference on | ||
133 | Applications of Computer Vision (WACV). IEEE, 2017.", | 146 | Applications of Computer Vision (WACV). IEEE, 2017.", | ||
134 | "format": "TAR", | 147 | "format": "TAR", | ||
135 | "hash": "", | 148 | "hash": "", | ||
136 | "id": "a8373c98-32a8-408c-b8e9-51e6b1e01777", | 149 | "id": "a8373c98-32a8-408c-b8e9-51e6b1e01777", | ||
137 | "last_modified": "2020-11-11T17:21:13.424875", | 150 | "last_modified": "2020-11-11T17:21:13.424875", | ||
n | 138 | "metadata_modified": "2023-01-12T13:14:29.977129", | n | 151 | "metadata_modified": "2023-08-04T08:46:53.966470", |
139 | "mimetype": "application/x-tar", | 152 | "mimetype": "application/x-tar", | ||
140 | "mimetype_inner": null, | 153 | "mimetype_inner": null, | ||
141 | "name": "SocEID.tar.gz", | 154 | "name": "SocEID.tar.gz", | ||
142 | "package_id": "3afb333d-230f-4829-91bb-d4dd41bfdcfa", | 155 | "package_id": "3afb333d-230f-4829-91bb-d4dd41bfdcfa", | ||
143 | "position": 2, | 156 | "position": 2, | ||
144 | "resource_type": null, | 157 | "resource_type": null, | ||
145 | "size": 1544221, | 158 | "size": 1544221, | ||
146 | "state": "active", | 159 | "state": "active", | ||
147 | "url": | 160 | "url": | ||
148 | resource/a8373c98-32a8-408c-b8e9-51e6b1e01777/download/soceid.tar.gz", | 161 | resource/a8373c98-32a8-408c-b8e9-51e6b1e01777/download/soceid.tar.gz", | ||
149 | "url_type": "" | 162 | "url_type": "" | ||
150 | }, | 163 | }, | ||
151 | { | 164 | { | ||
152 | "cache_last_updated": null, | 165 | "cache_last_updated": null, | ||
153 | "cache_url": null, | 166 | "cache_url": null, | ||
154 | "created": "2020-11-11T17:23:51.340459", | 167 | "created": "2020-11-11T17:23:51.340459", | ||
155 | "description": "Ontology, corresponding meta information, and | 168 | "description": "Ontology, corresponding meta information, and | ||
156 | splits for the *WIDER* dataset introduced by:\r\n\r\nYuanjun Xiong, | 169 | splits for the *WIDER* dataset introduced by:\r\n\r\nYuanjun Xiong, | ||
157 | Kai Zhu, Dahua Lin, Xiaoou Tang: \"Recognize complex events from | 170 | Kai Zhu, Dahua Lin, Xiaoou Tang: \"Recognize complex events from | ||
158 | static images by fusing deep channels.\" Proceedings of the IEEE | 171 | static images by fusing deep channels.\" Proceedings of the IEEE | ||
159 | Conference on Computer Vision and Pattern Recognition. 2015.", | 172 | Conference on Computer Vision and Pattern Recognition. 2015.", | ||
160 | "format": "TAR", | 173 | "format": "TAR", | ||
161 | "hash": "", | 174 | "hash": "", | ||
162 | "id": "b1c2f92b-4b69-46fc-9282-16acc7a1c9aa", | 175 | "id": "b1c2f92b-4b69-46fc-9282-16acc7a1c9aa", | ||
163 | "last_modified": "2020-11-11T17:23:51.291259", | 176 | "last_modified": "2020-11-11T17:23:51.291259", | ||
n | 164 | "metadata_modified": "2023-01-12T13:14:29.977235", | n | 177 | "metadata_modified": "2023-08-04T08:46:53.966575", |
165 | "mimetype": "application/x-tar", | 178 | "mimetype": "application/x-tar", | ||
166 | "mimetype_inner": null, | 179 | "mimetype_inner": null, | ||
167 | "name": "WIDER.tar.gz", | 180 | "name": "WIDER.tar.gz", | ||
168 | "package_id": "3afb333d-230f-4829-91bb-d4dd41bfdcfa", | 181 | "package_id": "3afb333d-230f-4829-91bb-d4dd41bfdcfa", | ||
169 | "position": 3, | 182 | "position": 3, | ||
170 | "resource_type": null, | 183 | "resource_type": null, | ||
171 | "size": 1625015, | 184 | "size": 1625015, | ||
172 | "state": "active", | 185 | "state": "active", | ||
173 | "url": | 186 | "url": | ||
174 | /resource/b1c2f92b-4b69-46fc-9282-16acc7a1c9aa/download/wider.tar.gz", | 187 | /resource/b1c2f92b-4b69-46fc-9282-16acc7a1c9aa/download/wider.tar.gz", | ||
175 | "url_type": "" | 188 | "url_type": "" | ||
176 | }, | 189 | }, | ||
177 | { | 190 | { | ||
178 | "cache_last_updated": null, | 191 | "cache_last_updated": null, | ||
179 | "cache_url": null, | 192 | "cache_url": null, | ||
180 | "created": "2020-11-11T17:26:23.812318", | 193 | "created": "2020-11-11T17:26:23.812318", | ||
181 | "description": "Classification baseline (denoted as C) based on | 194 | "description": "Classification baseline (denoted as C) based on | ||
182 | the ResNet-50 architecture trained on VisE-D.", | 195 | the ResNet-50 architecture trained on VisE-D.", | ||
183 | "format": "TAR", | 196 | "format": "TAR", | ||
184 | "hash": "", | 197 | "hash": "", | ||
185 | "id": "727c3ee1-4107-4996-878d-1caf537730e8", | 198 | "id": "727c3ee1-4107-4996-878d-1caf537730e8", | ||
186 | "last_modified": "2020-11-13T10:05:46.185080", | 199 | "last_modified": "2020-11-13T10:05:46.185080", | ||
n | 187 | "metadata_modified": "2023-01-12T13:14:29.977338", | n | 200 | "metadata_modified": "2023-08-04T08:46:53.966678", |
188 | "mimetype": "application/x-tar", | 201 | "mimetype": "application/x-tar", | ||
189 | "mimetype_inner": null, | 202 | "mimetype_inner": null, | ||
190 | "name": "VisE_C.tar.gz", | 203 | "name": "VisE_C.tar.gz", | ||
191 | "package_id": "3afb333d-230f-4829-91bb-d4dd41bfdcfa", | 204 | "package_id": "3afb333d-230f-4829-91bb-d4dd41bfdcfa", | ||
192 | "position": 4, | 205 | "position": 4, | ||
193 | "resource_type": null, | 206 | "resource_type": null, | ||
194 | "size": 177167171, | 207 | "size": 177167171, | ||
195 | "state": "active", | 208 | "state": "active", | ||
196 | "url": | 209 | "url": | ||
197 | resource/727c3ee1-4107-4996-878d-1caf537730e8/download/vise_c.tar.gz", | 210 | resource/727c3ee1-4107-4996-878d-1caf537730e8/download/vise_c.tar.gz", | ||
198 | "url_type": "" | 211 | "url_type": "" | ||
199 | }, | 212 | }, | ||
200 | { | 213 | { | ||
201 | "cache_last_updated": null, | 214 | "cache_last_updated": null, | ||
202 | "cache_url": null, | 215 | "cache_url": null, | ||
203 | "created": "2020-11-11T17:30:49.600252", | 216 | "created": "2020-11-11T17:30:49.600252", | ||
204 | "description": "Ontology-driven approach (denoted as | 217 | "description": "Ontology-driven approach (denoted as | ||
205 | CO^{cos}_{\\delta}) based on the ResNet-50 architecture trained on | 218 | CO^{cos}_{\\delta}) based on the ResNet-50 architecture trained on | ||
206 | VisE-D.", | 219 | VisE-D.", | ||
207 | "format": "TAR", | 220 | "format": "TAR", | ||
208 | "hash": "", | 221 | "hash": "", | ||
209 | "id": "b105c1aa-3bc4-4233-8103-8f4616948d85", | 222 | "id": "b105c1aa-3bc4-4233-8103-8f4616948d85", | ||
210 | "last_modified": "2020-11-13T10:07:40.264971", | 223 | "last_modified": "2020-11-13T10:07:40.264971", | ||
n | 211 | "metadata_modified": "2023-01-12T13:14:29.977439", | n | 224 | "metadata_modified": "2023-08-04T08:46:53.966781", |
212 | "mimetype": "application/x-tar", | 225 | "mimetype": "application/x-tar", | ||
213 | "mimetype_inner": null, | 226 | "mimetype_inner": null, | ||
214 | "name": "VisE_CO_cos.tar.gz", | 227 | "name": "VisE_CO_cos.tar.gz", | ||
215 | "package_id": "3afb333d-230f-4829-91bb-d4dd41bfdcfa", | 228 | "package_id": "3afb333d-230f-4829-91bb-d4dd41bfdcfa", | ||
216 | "position": 5, | 229 | "position": 5, | ||
217 | "resource_type": null, | 230 | "resource_type": null, | ||
218 | "size": 180947205, | 231 | "size": 180947205, | ||
219 | "state": "active", | 232 | "state": "active", | ||
220 | "url": | 233 | "url": | ||
221 | rce/b105c1aa-3bc4-4233-8103-8f4616948d85/download/vise_co_cos.tar.gz", | 234 | rce/b105c1aa-3bc4-4233-8103-8f4616948d85/download/vise_co_cos.tar.gz", | ||
222 | "url_type": "" | 235 | "url_type": "" | ||
223 | }, | 236 | }, | ||
224 | { | 237 | { | ||
225 | "cache_last_updated": null, | 238 | "cache_last_updated": null, | ||
226 | "cache_url": null, | 239 | "cache_url": null, | ||
227 | "created": "2020-11-11T17:32:07.201026", | 240 | "created": "2020-11-11T17:32:07.201026", | ||
228 | "description": "Ontology-driven approach with redundancy removal | 241 | "description": "Ontology-driven approach with redundancy removal | ||
229 | (denoted as CO^{cel}_{6\\omega}-RR) based on the ResNet-50 | 242 | (denoted as CO^{cel}_{6\\omega}-RR) based on the ResNet-50 | ||
230 | architecture trained on VisE-D.", | 243 | architecture trained on VisE-D.", | ||
231 | "format": "TAR", | 244 | "format": "TAR", | ||
232 | "hash": "", | 245 | "hash": "", | ||
233 | "id": "7c672f2b-f45e-40aa-b6bb-01fb2e9bf5e7", | 246 | "id": "7c672f2b-f45e-40aa-b6bb-01fb2e9bf5e7", | ||
234 | "last_modified": "2020-11-13T10:08:37.192754", | 247 | "last_modified": "2020-11-13T10:08:37.192754", | ||
n | 235 | "metadata_modified": "2023-01-12T13:14:29.977539", | n | 248 | "metadata_modified": "2023-08-04T08:46:53.966880", |
236 | "mimetype": "application/x-tar", | 249 | "mimetype": "application/x-tar", | ||
237 | "mimetype_inner": null, | 250 | "mimetype_inner": null, | ||
238 | "name": "VisE_CO_cel.tar.gz", | 251 | "name": "VisE_CO_cel.tar.gz", | ||
239 | "package_id": "3afb333d-230f-4829-91bb-d4dd41bfdcfa", | 252 | "package_id": "3afb333d-230f-4829-91bb-d4dd41bfdcfa", | ||
240 | "position": 6, | 253 | "position": 6, | ||
241 | "resource_type": null, | 254 | "resource_type": null, | ||
242 | "size": 178380049, | 255 | "size": 178380049, | ||
243 | "state": "active", | 256 | "state": "active", | ||
244 | "url": | 257 | "url": | ||
245 | rce/7c672f2b-f45e-40aa-b6bb-01fb2e9bf5e7/download/vise_co_cel.tar.gz", | 258 | rce/7c672f2b-f45e-40aa-b6bb-01fb2e9bf5e7/download/vise_co_cel.tar.gz", | ||
246 | "url_type": "" | 259 | "url_type": "" | ||
247 | } | 260 | } | ||
248 | ], | 261 | ], | ||
t | t | 262 | "services_used_list": "", | ||
249 | "source_metadata_created": "2020-11-11T15:57:10.070362", | 263 | "source_metadata_created": "2020-11-11T15:57:10.070362", | ||
250 | "source_metadata_modified": "2022-01-20T11:00:35.121565", | 264 | "source_metadata_modified": "2022-01-20T11:00:35.121565", | ||
251 | "state": "active", | 265 | "state": "active", | ||
252 | "tags": [ | 266 | "tags": [ | ||
253 | { | 267 | { | ||
254 | "display_name": "computer vision", | 268 | "display_name": "computer vision", | ||
255 | "id": "f650b4e3-9955-49b0-ba7b-2d302a990978", | 269 | "id": "f650b4e3-9955-49b0-ba7b-2d302a990978", | ||
256 | "name": "computer vision", | 270 | "name": "computer vision", | ||
257 | "state": "active", | 271 | "state": "active", | ||
258 | "vocabulary_id": null | 272 | "vocabulary_id": null | ||
259 | }, | 273 | }, | ||
260 | { | 274 | { | ||
261 | "display_name": "deep learning", | 275 | "display_name": "deep learning", | ||
262 | "id": "19e41883-3799-4184-9e0e-26c95795b119", | 276 | "id": "19e41883-3799-4184-9e0e-26c95795b119", | ||
263 | "name": "deep learning", | 277 | "name": "deep learning", | ||
264 | "state": "active", | 278 | "state": "active", | ||
265 | "vocabulary_id": null | 279 | "vocabulary_id": null | ||
266 | }, | 280 | }, | ||
267 | { | 281 | { | ||
268 | "display_name": "event classification", | 282 | "display_name": "event classification", | ||
269 | "id": "7b5441ec-e6db-4f04-b813-8f4880639db5", | 283 | "id": "7b5441ec-e6db-4f04-b813-8f4880639db5", | ||
270 | "name": "event classification", | 284 | "name": "event classification", | ||
271 | "state": "active", | 285 | "state": "active", | ||
272 | "vocabulary_id": null | 286 | "vocabulary_id": null | ||
273 | }, | 287 | }, | ||
274 | { | 288 | { | ||
275 | "display_name": "image classification", | 289 | "display_name": "image classification", | ||
276 | "id": "34936550-ce1a-41b5-8c58-23081a6c673d", | 290 | "id": "34936550-ce1a-41b5-8c58-23081a6c673d", | ||
277 | "name": "image classification", | 291 | "name": "image classification", | ||
278 | "state": "active", | 292 | "state": "active", | ||
279 | "vocabulary_id": null | 293 | "vocabulary_id": null | ||
280 | }, | 294 | }, | ||
281 | { | 295 | { | ||
282 | "display_name": "knowledge graphs", | 296 | "display_name": "knowledge graphs", | ||
283 | "id": "6a8020fc-d07a-4f67-9d12-74c252ba1c81", | 297 | "id": "6a8020fc-d07a-4f67-9d12-74c252ba1c81", | ||
284 | "name": "knowledge graphs", | 298 | "name": "knowledge graphs", | ||
285 | "state": "active", | 299 | "state": "active", | ||
286 | "vocabulary_id": null | 300 | "vocabulary_id": null | ||
287 | } | 301 | } | ||
288 | ], | 302 | ], | ||
289 | "terms_of_usage": "Yes", | 303 | "terms_of_usage": "Yes", | ||
290 | "title": "VisE-D: Visual Event Classification Dataset", | 304 | "title": "VisE-D: Visual Event Classification Dataset", | ||
291 | "type": "vdataset", | 305 | "type": "vdataset", | ||
292 | "url": "https://data.uni-hannover.de/dataset/vise", | 306 | "url": "https://data.uni-hannover.de/dataset/vise", | ||
293 | "version": "" | 307 | "version": "" | ||
294 | } | 308 | } |