Changes
On December 16, 2024 at 7:10:49 PM UTC, admin:
-
Changed value of field
doi_status
toTrue
in Visual Context-Aware Convolution Filters for Transformation-Invariant Neural Networks -
Changed value of field
doi_date_published
to2024-12-16
in Visual Context-Aware Convolution Filters for Transformation-Invariant Neural Networks -
Added resource Original Metadata to Visual Context-Aware Convolution Filters for Transformation-Invariant Neural Networks
f | 1 | { | f | 1 | { |
2 | "access_rights": "", | 2 | "access_rights": "", | ||
3 | "author": "Suraj Tripathi", | 3 | "author": "Suraj Tripathi", | ||
4 | "author_email": "", | 4 | "author_email": "", | ||
5 | "citation": [], | 5 | "citation": [], | ||
6 | "creator_user_id": "17755db4-395a-4b3b-ac09-e8e3484ca700", | 6 | "creator_user_id": "17755db4-395a-4b3b-ac09-e8e3484ca700", | ||
7 | "defined_in": "https://doi.org/10.48550/arXiv.1906.09986", | 7 | "defined_in": "https://doi.org/10.48550/arXiv.1906.09986", | ||
8 | "doi": "10.57702/1d8ex6mq", | 8 | "doi": "10.57702/1d8ex6mq", | ||
n | 9 | "doi_date_published": null, | n | 9 | "doi_date_published": "2024-12-16", |
10 | "doi_publisher": "TIB", | 10 | "doi_publisher": "TIB", | ||
n | 11 | "doi_status": false, | n | 11 | "doi_status": true, |
12 | "domain": "https://service.tib.eu/ldmservice", | 12 | "domain": "https://service.tib.eu/ldmservice", | ||
13 | "extra_authors": [ | 13 | "extra_authors": [ | ||
14 | { | 14 | { | ||
15 | "extra_author": "Abhay Kumar", | 15 | "extra_author": "Abhay Kumar", | ||
16 | "orcid": "" | 16 | "orcid": "" | ||
17 | }, | 17 | }, | ||
18 | { | 18 | { | ||
19 | "extra_author": "Chirag Singh", | 19 | "extra_author": "Chirag Singh", | ||
20 | "orcid": "" | 20 | "orcid": "" | ||
21 | } | 21 | } | ||
22 | ], | 22 | ], | ||
23 | "groups": [ | 23 | "groups": [ | ||
24 | { | 24 | { | ||
25 | "description": "", | 25 | "description": "", | ||
26 | "display_name": "Computer Vision", | 26 | "display_name": "Computer Vision", | ||
27 | "id": "d09caf7c-26c7-4e4d-bb8e-49476a90ba25", | 27 | "id": "d09caf7c-26c7-4e4d-bb8e-49476a90ba25", | ||
28 | "image_display_url": "", | 28 | "image_display_url": "", | ||
29 | "name": "computer-vision", | 29 | "name": "computer-vision", | ||
30 | "title": "Computer Vision" | 30 | "title": "Computer Vision" | ||
31 | }, | 31 | }, | ||
32 | { | 32 | { | ||
33 | "description": "", | 33 | "description": "", | ||
34 | "display_name": "Deep Learning", | 34 | "display_name": "Deep Learning", | ||
35 | "id": "d2734132-7098-4cc5-9f4c-5f9b6e1d7922", | 35 | "id": "d2734132-7098-4cc5-9f4c-5f9b6e1d7922", | ||
36 | "image_display_url": "", | 36 | "image_display_url": "", | ||
37 | "name": "deep-learning", | 37 | "name": "deep-learning", | ||
38 | "title": "Deep Learning" | 38 | "title": "Deep Learning" | ||
39 | } | 39 | } | ||
40 | ], | 40 | ], | ||
41 | "id": "9ea25542-c2fe-4d77-8d87-014c48d4ae6a", | 41 | "id": "9ea25542-c2fe-4d77-8d87-014c48d4ae6a", | ||
42 | "isopen": false, | 42 | "isopen": false, | ||
43 | "landing_page": "", | 43 | "landing_page": "", | ||
44 | "license_title": null, | 44 | "license_title": null, | ||
45 | "link_orkg": "", | 45 | "link_orkg": "", | ||
46 | "metadata_created": "2024-12-16T19:10:47.814745", | 46 | "metadata_created": "2024-12-16T19:10:47.814745", | ||
n | 47 | "metadata_modified": "2024-12-16T19:10:47.814752", | n | 47 | "metadata_modified": "2024-12-16T19:10:48.329047", |
48 | "name": | 48 | "name": | ||
49 | are-convolution-filters-for-transformation-invariant-neural-networks", | 49 | are-convolution-filters-for-transformation-invariant-neural-networks", | ||
50 | "notes": "The proposed framework generates a unique set of | 50 | "notes": "The proposed framework generates a unique set of | ||
51 | context-dependent filters based on the input image, and combines them | 51 | context-dependent filters based on the input image, and combines them | ||
52 | with max-pooling to produce transformation-invariant feature | 52 | with max-pooling to produce transformation-invariant feature | ||
53 | representations.", | 53 | representations.", | ||
n | 54 | "num_resources": 0, | n | 54 | "num_resources": 1, |
55 | "num_tags": 3, | 55 | "num_tags": 3, | ||
56 | "organization": { | 56 | "organization": { | ||
57 | "approval_status": "approved", | 57 | "approval_status": "approved", | ||
58 | "created": "2024-11-25T12:11:38.292601", | 58 | "created": "2024-11-25T12:11:38.292601", | ||
59 | "description": "", | 59 | "description": "", | ||
60 | "id": "079d46db-32df-4b48-91f3-0a8bc8f69559", | 60 | "id": "079d46db-32df-4b48-91f3-0a8bc8f69559", | ||
61 | "image_url": "", | 61 | "image_url": "", | ||
62 | "is_organization": true, | 62 | "is_organization": true, | ||
63 | "name": "no-organization", | 63 | "name": "no-organization", | ||
64 | "state": "active", | 64 | "state": "active", | ||
65 | "title": "No Organization", | 65 | "title": "No Organization", | ||
66 | "type": "organization" | 66 | "type": "organization" | ||
67 | }, | 67 | }, | ||
68 | "owner_org": "079d46db-32df-4b48-91f3-0a8bc8f69559", | 68 | "owner_org": "079d46db-32df-4b48-91f3-0a8bc8f69559", | ||
69 | "private": false, | 69 | "private": false, | ||
70 | "relationships_as_object": [], | 70 | "relationships_as_object": [], | ||
71 | "relationships_as_subject": [], | 71 | "relationships_as_subject": [], | ||
t | 72 | "resources": [], | t | 72 | "resources": [ |
73 | { | ||||
74 | "cache_last_updated": null, | ||||
75 | "cache_url": null, | ||||
76 | "created": "2024-12-16T18:25:39", | ||||
77 | "data": [ | ||||
78 | "dcterms:title", | ||||
79 | "dcterms:accessRights", | ||||
80 | "dcterms:creator", | ||||
81 | "dcterms:description", | ||||
82 | "dcterms:issued", | ||||
83 | "dcterms:language", | ||||
84 | "dcterms:identifier", | ||||
85 | "dcat:theme", | ||||
86 | "dcterms:type", | ||||
87 | "dcat:keyword", | ||||
88 | "dcat:landingPage", | ||||
89 | "dcterms:hasVersion", | ||||
90 | "dcterms:format", | ||||
91 | "mls:task", | ||||
92 | "datacite:isDescribedBy" | ||||
93 | ], | ||||
94 | "description": "The json representation of the dataset with its | ||||
95 | distributions based on DCAT.", | ||||
96 | "format": "JSON", | ||||
97 | "hash": "", | ||||
98 | "id": "353716b3-4877-4618-a835-07125b82e6d4", | ||||
99 | "last_modified": "2024-12-16T19:10:48.321309", | ||||
100 | "metadata_modified": "2024-12-16T19:10:48.332006", | ||||
101 | "mimetype": "application/json", | ||||
102 | "mimetype_inner": null, | ||||
103 | "name": "Original Metadata", | ||||
104 | "package_id": "9ea25542-c2fe-4d77-8d87-014c48d4ae6a", | ||||
105 | "position": 0, | ||||
106 | "resource_type": null, | ||||
107 | "size": 878, | ||||
108 | "state": "active", | ||||
109 | "url": | ||||
110 | resource/353716b3-4877-4618-a835-07125b82e6d4/download/metadata.json", | ||||
111 | "url_type": "upload" | ||||
112 | } | ||||
113 | ], | ||||
73 | "services_used_list": "", | 114 | "services_used_list": "", | ||
74 | "state": "active", | 115 | "state": "active", | ||
75 | "tags": [ | 116 | "tags": [ | ||
76 | { | 117 | { | ||
77 | "display_name": "Context-Aware", | 118 | "display_name": "Context-Aware", | ||
78 | "id": "78d7d8d1-2244-4c15-b965-88c753188aed", | 119 | "id": "78d7d8d1-2244-4c15-b965-88c753188aed", | ||
79 | "name": "Context-Aware", | 120 | "name": "Context-Aware", | ||
80 | "state": "active", | 121 | "state": "active", | ||
81 | "vocabulary_id": null | 122 | "vocabulary_id": null | ||
82 | }, | 123 | }, | ||
83 | { | 124 | { | ||
84 | "display_name": "Convolutional Neural Networks", | 125 | "display_name": "Convolutional Neural Networks", | ||
85 | "id": "a167095b-f2a5-4c27-8f27-c413611cc4ee", | 126 | "id": "a167095b-f2a5-4c27-8f27-c413611cc4ee", | ||
86 | "name": "Convolutional Neural Networks", | 127 | "name": "Convolutional Neural Networks", | ||
87 | "state": "active", | 128 | "state": "active", | ||
88 | "vocabulary_id": null | 129 | "vocabulary_id": null | ||
89 | }, | 130 | }, | ||
90 | { | 131 | { | ||
91 | "display_name": "Transformation-Invariant", | 132 | "display_name": "Transformation-Invariant", | ||
92 | "id": "e48bca05-cc9b-4e64-a116-923fb4981889", | 133 | "id": "e48bca05-cc9b-4e64-a116-923fb4981889", | ||
93 | "name": "Transformation-Invariant", | 134 | "name": "Transformation-Invariant", | ||
94 | "state": "active", | 135 | "state": "active", | ||
95 | "vocabulary_id": null | 136 | "vocabulary_id": null | ||
96 | } | 137 | } | ||
97 | ], | 138 | ], | ||
98 | "title": "Visual Context-Aware Convolution Filters for | 139 | "title": "Visual Context-Aware Convolution Filters for | ||
99 | Transformation-Invariant Neural Networks", | 140 | Transformation-Invariant Neural Networks", | ||
100 | "type": "dataset", | 141 | "type": "dataset", | ||
101 | "version": "" | 142 | "version": "" | ||
102 | } | 143 | } |