Changes
On December 2, 2024 at 6:28:57 PM UTC, admin:
-
Changed value of field
doi_status
toTrue
in Subject2Vec: Generative-Discriminative Approach from a Set of Image Patches to a Vector -
Changed value of field
doi_date_published
to2024-12-02
in Subject2Vec: Generative-Discriminative Approach from a Set of Image Patches to a Vector -
Added resource Original Metadata to Subject2Vec: Generative-Discriminative Approach from a Set of Image Patches to a Vector
f | 1 | { | f | 1 | { |
2 | "access_rights": "", | 2 | "access_rights": "", | ||
3 | "author": "Sumedha Singla", | 3 | "author": "Sumedha Singla", | ||
4 | "author_email": "", | 4 | "author_email": "", | ||
5 | "citation": [], | 5 | "citation": [], | ||
6 | "creator_user_id": "17755db4-395a-4b3b-ac09-e8e3484ca700", | 6 | "creator_user_id": "17755db4-395a-4b3b-ac09-e8e3484ca700", | ||
7 | "defined_in": "https://doi.org/10.48550/arXiv.1806.11217", | 7 | "defined_in": "https://doi.org/10.48550/arXiv.1806.11217", | ||
8 | "doi": "10.57702/dn2vz0m7", | 8 | "doi": "10.57702/dn2vz0m7", | ||
n | 9 | "doi_date_published": null, | n | 9 | "doi_date_published": "2024-12-02", |
10 | "doi_publisher": "TIB", | 10 | "doi_publisher": "TIB", | ||
n | 11 | "doi_status": false, | n | 11 | "doi_status": true, |
12 | "domain": "https://service.tib.eu/ldmservice", | 12 | "domain": "https://service.tib.eu/ldmservice", | ||
13 | "extra_authors": [ | 13 | "extra_authors": [ | ||
14 | { | 14 | { | ||
15 | "extra_author": "Mingming Gong", | 15 | "extra_author": "Mingming Gong", | ||
16 | "orcid": "" | 16 | "orcid": "" | ||
17 | }, | 17 | }, | ||
18 | { | 18 | { | ||
19 | "extra_author": "Siamak Ravanbakhsh", | 19 | "extra_author": "Siamak Ravanbakhsh", | ||
20 | "orcid": "" | 20 | "orcid": "" | ||
21 | }, | 21 | }, | ||
22 | { | 22 | { | ||
23 | "extra_author": "Frank Sciurba", | 23 | "extra_author": "Frank Sciurba", | ||
24 | "orcid": "" | 24 | "orcid": "" | ||
25 | }, | 25 | }, | ||
26 | { | 26 | { | ||
27 | "extra_author": "Barnabas Poczos", | 27 | "extra_author": "Barnabas Poczos", | ||
28 | "orcid": "" | 28 | "orcid": "" | ||
29 | }, | 29 | }, | ||
30 | { | 30 | { | ||
31 | "extra_author": "Kayhan N. Batmanghelich", | 31 | "extra_author": "Kayhan N. Batmanghelich", | ||
32 | "orcid": "" | 32 | "orcid": "" | ||
33 | } | 33 | } | ||
34 | ], | 34 | ], | ||
35 | "groups": [ | 35 | "groups": [ | ||
36 | { | 36 | { | ||
37 | "description": "", | 37 | "description": "", | ||
38 | "display_name": "Computer Vision", | 38 | "display_name": "Computer Vision", | ||
39 | "id": "d09caf7c-26c7-4e4d-bb8e-49476a90ba25", | 39 | "id": "d09caf7c-26c7-4e4d-bb8e-49476a90ba25", | ||
40 | "image_display_url": "", | 40 | "image_display_url": "", | ||
41 | "name": "computer-vision", | 41 | "name": "computer-vision", | ||
42 | "title": "Computer Vision" | 42 | "title": "Computer Vision" | ||
43 | }, | 43 | }, | ||
44 | { | 44 | { | ||
45 | "description": "", | 45 | "description": "", | ||
46 | "display_name": "Deep Learning", | 46 | "display_name": "Deep Learning", | ||
47 | "id": "d2734132-7098-4cc5-9f4c-5f9b6e1d7922", | 47 | "id": "d2734132-7098-4cc5-9f4c-5f9b6e1d7922", | ||
48 | "image_display_url": "", | 48 | "image_display_url": "", | ||
49 | "name": "deep-learning", | 49 | "name": "deep-learning", | ||
50 | "title": "Deep Learning" | 50 | "title": "Deep Learning" | ||
51 | }, | 51 | }, | ||
52 | { | 52 | { | ||
53 | "description": "", | 53 | "description": "", | ||
54 | "display_name": "Image Processing", | 54 | "display_name": "Image Processing", | ||
55 | "id": "31f537e7-23f2-4663-9ae4-f2147dfbce50", | 55 | "id": "31f537e7-23f2-4663-9ae4-f2147dfbce50", | ||
56 | "image_display_url": "", | 56 | "image_display_url": "", | ||
57 | "name": "image-processing", | 57 | "name": "image-processing", | ||
58 | "title": "Image Processing" | 58 | "title": "Image Processing" | ||
59 | } | 59 | } | ||
60 | ], | 60 | ], | ||
61 | "id": "178990e3-6a9e-4d63-b821-45de277642fa", | 61 | "id": "178990e3-6a9e-4d63-b821-45de277642fa", | ||
62 | "isopen": false, | 62 | "isopen": false, | ||
63 | "landing_page": "https://arxiv.org/abs/1709.09437", | 63 | "landing_page": "https://arxiv.org/abs/1709.09437", | ||
64 | "license_title": null, | 64 | "license_title": null, | ||
65 | "link_orkg": "", | 65 | "link_orkg": "", | ||
66 | "metadata_created": "2024-12-02T18:28:56.201404", | 66 | "metadata_created": "2024-12-02T18:28:56.201404", | ||
n | 67 | "metadata_modified": "2024-12-02T18:28:56.201411", | n | 67 | "metadata_modified": "2024-12-02T18:28:56.539494", |
68 | "name": | 68 | "name": | ||
69 | tive-discriminative-approach-from-a-set-of-image-patches-to-a-vector", | 69 | tive-discriminative-approach-from-a-set-of-image-patches-to-a-vector", | ||
70 | "notes": "A deep learning model that learns subject-level | 70 | "notes": "A deep learning model that learns subject-level | ||
71 | representation from a set of local features. The model represents the | 71 | representation from a set of local features. The model represents the | ||
72 | image volume as a bag (or set) of local features and can accommodate | 72 | image volume as a bag (or set) of local features and can accommodate | ||
73 | input images of variable sizes.", | 73 | input images of variable sizes.", | ||
n | 74 | "num_resources": 0, | n | 74 | "num_resources": 1, |
75 | "num_tags": 5, | 75 | "num_tags": 5, | ||
76 | "organization": { | 76 | "organization": { | ||
77 | "approval_status": "approved", | 77 | "approval_status": "approved", | ||
78 | "created": "2024-11-25T12:11:38.292601", | 78 | "created": "2024-11-25T12:11:38.292601", | ||
79 | "description": "", | 79 | "description": "", | ||
80 | "id": "079d46db-32df-4b48-91f3-0a8bc8f69559", | 80 | "id": "079d46db-32df-4b48-91f3-0a8bc8f69559", | ||
81 | "image_url": "", | 81 | "image_url": "", | ||
82 | "is_organization": true, | 82 | "is_organization": true, | ||
83 | "name": "no-organization", | 83 | "name": "no-organization", | ||
84 | "state": "active", | 84 | "state": "active", | ||
85 | "title": "No Organization", | 85 | "title": "No Organization", | ||
86 | "type": "organization" | 86 | "type": "organization" | ||
87 | }, | 87 | }, | ||
88 | "owner_org": "079d46db-32df-4b48-91f3-0a8bc8f69559", | 88 | "owner_org": "079d46db-32df-4b48-91f3-0a8bc8f69559", | ||
89 | "private": false, | 89 | "private": false, | ||
90 | "relationships_as_object": [], | 90 | "relationships_as_object": [], | ||
91 | "relationships_as_subject": [], | 91 | "relationships_as_subject": [], | ||
t | 92 | "resources": [], | t | 92 | "resources": [ |
93 | { | ||||
94 | "cache_last_updated": null, | ||||
95 | "cache_url": null, | ||||
96 | "created": "2024-12-02T18:38:42", | ||||
97 | "data": [ | ||||
98 | "dcterms:title", | ||||
99 | "dcterms:accessRights", | ||||
100 | "dcterms:creator", | ||||
101 | "dcterms:description", | ||||
102 | "dcterms:issued", | ||||
103 | "dcterms:language", | ||||
104 | "dcterms:identifier", | ||||
105 | "dcat:theme", | ||||
106 | "dcterms:type", | ||||
107 | "dcat:keyword", | ||||
108 | "dcat:landingPage", | ||||
109 | "dcterms:hasVersion", | ||||
110 | "dcterms:format", | ||||
111 | "mls:task", | ||||
112 | "datacite:isDescribedBy" | ||||
113 | ], | ||||
114 | "description": "The json representation of the dataset with its | ||||
115 | distributions based on DCAT.", | ||||
116 | "format": "JSON", | ||||
117 | "hash": "", | ||||
118 | "id": "2c763c67-abf7-4481-9dfc-af104fd5b1af", | ||||
119 | "last_modified": "2024-12-02T18:28:56.532270", | ||||
120 | "metadata_modified": "2024-12-02T18:28:56.542206", | ||||
121 | "mimetype": "application/json", | ||||
122 | "mimetype_inner": null, | ||||
123 | "name": "Original Metadata", | ||||
124 | "package_id": "178990e3-6a9e-4d63-b821-45de277642fa", | ||||
125 | "position": 0, | ||||
126 | "resource_type": null, | ||||
127 | "size": 1068, | ||||
128 | "state": "active", | ||||
129 | "url": | ||||
130 | resource/2c763c67-abf7-4481-9dfc-af104fd5b1af/download/metadata.json", | ||||
131 | "url_type": "upload" | ||||
132 | } | ||||
133 | ], | ||||
93 | "services_used_list": "", | 134 | "services_used_list": "", | ||
94 | "state": "active", | 135 | "state": "active", | ||
95 | "tags": [ | 136 | "tags": [ | ||
96 | { | 137 | { | ||
97 | "display_name": "computer vision", | 138 | "display_name": "computer vision", | ||
98 | "id": "f650b4e3-9955-49b0-ba7b-2d302a990978", | 139 | "id": "f650b4e3-9955-49b0-ba7b-2d302a990978", | ||
99 | "name": "computer vision", | 140 | "name": "computer vision", | ||
100 | "state": "active", | 141 | "state": "active", | ||
101 | "vocabulary_id": null | 142 | "vocabulary_id": null | ||
102 | }, | 143 | }, | ||
103 | { | 144 | { | ||
104 | "display_name": "deep learning", | 145 | "display_name": "deep learning", | ||
105 | "id": "19e41883-3799-4184-9e0e-26c95795b119", | 146 | "id": "19e41883-3799-4184-9e0e-26c95795b119", | ||
106 | "name": "deep learning", | 147 | "name": "deep learning", | ||
107 | "state": "active", | 148 | "state": "active", | ||
108 | "vocabulary_id": null | 149 | "vocabulary_id": null | ||
109 | }, | 150 | }, | ||
110 | { | 151 | { | ||
111 | "display_name": "image patches", | 152 | "display_name": "image patches", | ||
112 | "id": "951cbe50-6002-4e1b-9ba5-b411834fa41e", | 153 | "id": "951cbe50-6002-4e1b-9ba5-b411834fa41e", | ||
113 | "name": "image patches", | 154 | "name": "image patches", | ||
114 | "state": "active", | 155 | "state": "active", | ||
115 | "vocabulary_id": null | 156 | "vocabulary_id": null | ||
116 | }, | 157 | }, | ||
117 | { | 158 | { | ||
118 | "display_name": "local features", | 159 | "display_name": "local features", | ||
119 | "id": "05924ddd-b9b3-40e8-a5e4-6147707f6e67", | 160 | "id": "05924ddd-b9b3-40e8-a5e4-6147707f6e67", | ||
120 | "name": "local features", | 161 | "name": "local features", | ||
121 | "state": "active", | 162 | "state": "active", | ||
122 | "vocabulary_id": null | 163 | "vocabulary_id": null | ||
123 | }, | 164 | }, | ||
124 | { | 165 | { | ||
125 | "display_name": "subject-level representation", | 166 | "display_name": "subject-level representation", | ||
126 | "id": "3336400d-78a2-4c5f-b9f9-084725e65125", | 167 | "id": "3336400d-78a2-4c5f-b9f9-084725e65125", | ||
127 | "name": "subject-level representation", | 168 | "name": "subject-level representation", | ||
128 | "state": "active", | 169 | "state": "active", | ||
129 | "vocabulary_id": null | 170 | "vocabulary_id": null | ||
130 | } | 171 | } | ||
131 | ], | 172 | ], | ||
132 | "title": "Subject2Vec: Generative-Discriminative Approach from a Set | 173 | "title": "Subject2Vec: Generative-Discriminative Approach from a Set | ||
133 | of Image Patches to a Vector", | 174 | of Image Patches to a Vector", | ||
134 | "type": "dataset", | 175 | "type": "dataset", | ||
135 | "version": "" | 176 | "version": "" | ||
136 | } | 177 | } |