Changes
On December 16, 2024 at 7:52:41 PM UTC, admin:
-
Changed value of field
doi_status
toTrue
in Uni3DL: Unified Model for 3D and Language Understanding -
Changed value of field
doi_date_published
to2024-12-16
in Uni3DL: Unified Model for 3D and Language Understanding -
Added resource Original Metadata to Uni3DL: Unified Model for 3D and Language Understanding
f | 1 | { | f | 1 | { |
2 | "access_rights": "", | 2 | "access_rights": "", | ||
3 | "author": "Xiang Li", | 3 | "author": "Xiang Li", | ||
4 | "author_email": "", | 4 | "author_email": "", | ||
5 | "citation": [], | 5 | "citation": [], | ||
6 | "creator_user_id": "17755db4-395a-4b3b-ac09-e8e3484ca700", | 6 | "creator_user_id": "17755db4-395a-4b3b-ac09-e8e3484ca700", | ||
7 | "defined_in": "https://doi.org/10.48550/arXiv.2312.03026", | 7 | "defined_in": "https://doi.org/10.48550/arXiv.2312.03026", | ||
8 | "doi": "10.57702/qhrzeun0", | 8 | "doi": "10.57702/qhrzeun0", | ||
n | 9 | "doi_date_published": null, | n | 9 | "doi_date_published": "2024-12-16", |
10 | "doi_publisher": "TIB", | 10 | "doi_publisher": "TIB", | ||
n | 11 | "doi_status": false, | n | 11 | "doi_status": true, |
12 | "domain": "https://service.tib.eu/ldmservice", | 12 | "domain": "https://service.tib.eu/ldmservice", | ||
13 | "extra_authors": [ | 13 | "extra_authors": [ | ||
14 | { | 14 | { | ||
15 | "extra_author": "Jian Ding", | 15 | "extra_author": "Jian Ding", | ||
16 | "orcid": "" | 16 | "orcid": "" | ||
17 | }, | 17 | }, | ||
18 | { | 18 | { | ||
19 | "extra_author": "Zhaoyang Chen", | 19 | "extra_author": "Zhaoyang Chen", | ||
20 | "orcid": "" | 20 | "orcid": "" | ||
21 | }, | 21 | }, | ||
22 | { | 22 | { | ||
23 | "extra_author": "Mohamed Elhoseiny", | 23 | "extra_author": "Mohamed Elhoseiny", | ||
24 | "orcid": "" | 24 | "orcid": "" | ||
25 | } | 25 | } | ||
26 | ], | 26 | ], | ||
27 | "groups": [ | 27 | "groups": [ | ||
28 | { | 28 | { | ||
29 | "description": "", | 29 | "description": "", | ||
30 | "display_name": "3D Vision-Language Understanding", | 30 | "display_name": "3D Vision-Language Understanding", | ||
31 | "id": "0dc1fa37-a62e-418b-a54b-4638d7da3c03", | 31 | "id": "0dc1fa37-a62e-418b-a54b-4638d7da3c03", | ||
32 | "image_display_url": "", | 32 | "image_display_url": "", | ||
33 | "name": "3d-vision-language-understanding", | 33 | "name": "3d-vision-language-understanding", | ||
34 | "title": "3D Vision-Language Understanding" | 34 | "title": "3D Vision-Language Understanding" | ||
35 | } | 35 | } | ||
36 | ], | 36 | ], | ||
37 | "id": "b3e95ca9-425b-4f78-b6c5-f065af4caa53", | 37 | "id": "b3e95ca9-425b-4f78-b6c5-f065af4caa53", | ||
38 | "isopen": false, | 38 | "isopen": false, | ||
39 | "landing_page": "https://uni3dl.github.io/", | 39 | "landing_page": "https://uni3dl.github.io/", | ||
40 | "license_title": null, | 40 | "license_title": null, | ||
41 | "link_orkg": "", | 41 | "link_orkg": "", | ||
42 | "metadata_created": "2024-12-16T19:52:40.516457", | 42 | "metadata_created": "2024-12-16T19:52:40.516457", | ||
n | 43 | "metadata_modified": "2024-12-16T19:52:40.516464", | n | 43 | "metadata_modified": "2024-12-16T19:52:40.898662", |
44 | "name": "uni3dl--unified-model-for-3d-and-language-understanding", | 44 | "name": "uni3dl--unified-model-for-3d-and-language-understanding", | ||
45 | "notes": "Uni3DL is a unified model for 3D and language | 45 | "notes": "Uni3DL is a unified model for 3D and language | ||
46 | understanding. It operates directly on point clouds and supports | 46 | understanding. It operates directly on point clouds and supports | ||
47 | diverse 3D vision-language tasks, including semantic segmentation, | 47 | diverse 3D vision-language tasks, including semantic segmentation, | ||
48 | object detection, instance segmentation, grounded segmentation, | 48 | object detection, instance segmentation, grounded segmentation, | ||
49 | captioning, text-3D cross-modality retrieval, and zero-shot 3D object | 49 | captioning, text-3D cross-modality retrieval, and zero-shot 3D object | ||
50 | classification.", | 50 | classification.", | ||
n | 51 | "num_resources": 0, | n | 51 | "num_resources": 1, |
52 | "num_tags": 8, | 52 | "num_tags": 8, | ||
53 | "organization": { | 53 | "organization": { | ||
54 | "approval_status": "approved", | 54 | "approval_status": "approved", | ||
55 | "created": "2024-11-25T12:11:38.292601", | 55 | "created": "2024-11-25T12:11:38.292601", | ||
56 | "description": "", | 56 | "description": "", | ||
57 | "id": "079d46db-32df-4b48-91f3-0a8bc8f69559", | 57 | "id": "079d46db-32df-4b48-91f3-0a8bc8f69559", | ||
58 | "image_url": "", | 58 | "image_url": "", | ||
59 | "is_organization": true, | 59 | "is_organization": true, | ||
60 | "name": "no-organization", | 60 | "name": "no-organization", | ||
61 | "state": "active", | 61 | "state": "active", | ||
62 | "title": "No Organization", | 62 | "title": "No Organization", | ||
63 | "type": "organization" | 63 | "type": "organization" | ||
64 | }, | 64 | }, | ||
65 | "owner_org": "079d46db-32df-4b48-91f3-0a8bc8f69559", | 65 | "owner_org": "079d46db-32df-4b48-91f3-0a8bc8f69559", | ||
66 | "private": false, | 66 | "private": false, | ||
67 | "relationships_as_object": [], | 67 | "relationships_as_object": [], | ||
68 | "relationships_as_subject": [], | 68 | "relationships_as_subject": [], | ||
t | 69 | "resources": [], | t | 69 | "resources": [ |
70 | { | ||||
71 | "cache_last_updated": null, | ||||
72 | "cache_url": null, | ||||
73 | "created": "2024-12-16T18:25:43", | ||||
74 | "data": [ | ||||
75 | "dcterms:title", | ||||
76 | "dcterms:accessRights", | ||||
77 | "dcterms:creator", | ||||
78 | "dcterms:description", | ||||
79 | "dcterms:issued", | ||||
80 | "dcterms:language", | ||||
81 | "dcterms:identifier", | ||||
82 | "dcat:theme", | ||||
83 | "dcterms:type", | ||||
84 | "dcat:keyword", | ||||
85 | "dcat:landingPage", | ||||
86 | "dcterms:hasVersion", | ||||
87 | "dcterms:format", | ||||
88 | "mls:task", | ||||
89 | "datacite:isDescribedBy" | ||||
90 | ], | ||||
91 | "description": "The json representation of the dataset with its | ||||
92 | distributions based on DCAT.", | ||||
93 | "format": "JSON", | ||||
94 | "hash": "", | ||||
95 | "id": "eaa1a2e4-fb7d-4a7c-81a2-ac5c137f2542", | ||||
96 | "last_modified": "2024-12-16T19:52:40.891216", | ||||
97 | "metadata_modified": "2024-12-16T19:52:40.901468", | ||||
98 | "mimetype": "application/json", | ||||
99 | "mimetype_inner": null, | ||||
100 | "name": "Original Metadata", | ||||
101 | "package_id": "b3e95ca9-425b-4f78-b6c5-f065af4caa53", | ||||
102 | "position": 0, | ||||
103 | "resource_type": null, | ||||
104 | "size": 1238, | ||||
105 | "state": "active", | ||||
106 | "url": | ||||
107 | resource/eaa1a2e4-fb7d-4a7c-81a2-ac5c137f2542/download/metadata.json", | ||||
108 | "url_type": "upload" | ||||
109 | } | ||||
110 | ], | ||||
70 | "services_used_list": "", | 111 | "services_used_list": "", | ||
71 | "state": "active", | 112 | "state": "active", | ||
72 | "tags": [ | 113 | "tags": [ | ||
73 | { | 114 | { | ||
74 | "display_name": "3D Vision-Language Understanding", | 115 | "display_name": "3D Vision-Language Understanding", | ||
75 | "id": "84b547c0-3915-4345-b30f-68c66bd37e11", | 116 | "id": "84b547c0-3915-4345-b30f-68c66bd37e11", | ||
76 | "name": "3D Vision-Language Understanding", | 117 | "name": "3D Vision-Language Understanding", | ||
77 | "state": "active", | 118 | "state": "active", | ||
78 | "vocabulary_id": null | 119 | "vocabulary_id": null | ||
79 | }, | 120 | }, | ||
80 | { | 121 | { | ||
81 | "display_name": "Captioning", | 122 | "display_name": "Captioning", | ||
82 | "id": "99ec7549-b23a-432e-88f4-803dd8685b2c", | 123 | "id": "99ec7549-b23a-432e-88f4-803dd8685b2c", | ||
83 | "name": "Captioning", | 124 | "name": "Captioning", | ||
84 | "state": "active", | 125 | "state": "active", | ||
85 | "vocabulary_id": null | 126 | "vocabulary_id": null | ||
86 | }, | 127 | }, | ||
87 | { | 128 | { | ||
88 | "display_name": "Grounded Segmentation", | 129 | "display_name": "Grounded Segmentation", | ||
89 | "id": "e6d505b8-e003-4c84-b164-0f0407aa3b2f", | 130 | "id": "e6d505b8-e003-4c84-b164-0f0407aa3b2f", | ||
90 | "name": "Grounded Segmentation", | 131 | "name": "Grounded Segmentation", | ||
91 | "state": "active", | 132 | "state": "active", | ||
92 | "vocabulary_id": null | 133 | "vocabulary_id": null | ||
93 | }, | 134 | }, | ||
94 | { | 135 | { | ||
95 | "display_name": "Instance Segmentation", | 136 | "display_name": "Instance Segmentation", | ||
96 | "id": "b58d8dfe-1216-401d-8a2a-ceb09e07a013", | 137 | "id": "b58d8dfe-1216-401d-8a2a-ceb09e07a013", | ||
97 | "name": "Instance Segmentation", | 138 | "name": "Instance Segmentation", | ||
98 | "state": "active", | 139 | "state": "active", | ||
99 | "vocabulary_id": null | 140 | "vocabulary_id": null | ||
100 | }, | 141 | }, | ||
101 | { | 142 | { | ||
102 | "display_name": "Object Detection", | 143 | "display_name": "Object Detection", | ||
103 | "id": "44adc011-570b-46cf-9a65-ab72ca690477", | 144 | "id": "44adc011-570b-46cf-9a65-ab72ca690477", | ||
104 | "name": "Object Detection", | 145 | "name": "Object Detection", | ||
105 | "state": "active", | 146 | "state": "active", | ||
106 | "vocabulary_id": null | 147 | "vocabulary_id": null | ||
107 | }, | 148 | }, | ||
108 | { | 149 | { | ||
109 | "display_name": "Point Clouds", | 150 | "display_name": "Point Clouds", | ||
110 | "id": "cd5e5c68-6deb-4e83-b810-0756f3961446", | 151 | "id": "cd5e5c68-6deb-4e83-b810-0756f3961446", | ||
111 | "name": "Point Clouds", | 152 | "name": "Point Clouds", | ||
112 | "state": "active", | 153 | "state": "active", | ||
113 | "vocabulary_id": null | 154 | "vocabulary_id": null | ||
114 | }, | 155 | }, | ||
115 | { | 156 | { | ||
116 | "display_name": "Semantic Segmentation", | 157 | "display_name": "Semantic Segmentation", | ||
117 | "id": "809ad6af-28cd-43bd-974d-055a5c0f2973", | 158 | "id": "809ad6af-28cd-43bd-974d-055a5c0f2973", | ||
118 | "name": "Semantic Segmentation", | 159 | "name": "Semantic Segmentation", | ||
119 | "state": "active", | 160 | "state": "active", | ||
120 | "vocabulary_id": null | 161 | "vocabulary_id": null | ||
121 | }, | 162 | }, | ||
122 | { | 163 | { | ||
123 | "display_name": "Text-3D Cross-Modality Retrieval", | 164 | "display_name": "Text-3D Cross-Modality Retrieval", | ||
124 | "id": "e6be53dd-1ed9-467f-9590-f6cb1b783bb3", | 165 | "id": "e6be53dd-1ed9-467f-9590-f6cb1b783bb3", | ||
125 | "name": "Text-3D Cross-Modality Retrieval", | 166 | "name": "Text-3D Cross-Modality Retrieval", | ||
126 | "state": "active", | 167 | "state": "active", | ||
127 | "vocabulary_id": null | 168 | "vocabulary_id": null | ||
128 | } | 169 | } | ||
129 | ], | 170 | ], | ||
130 | "title": "Uni3DL: Unified Model for 3D and Language Understanding", | 171 | "title": "Uni3DL: Unified Model for 3D and Language Understanding", | ||
131 | "type": "dataset", | 172 | "type": "dataset", | ||
132 | "version": "" | 173 | "version": "" | ||
133 | } | 174 | } |