f | { | f | { |
| "access_rights": "", | | "access_rights": "", |
n | "author": "Xuehao Wang", | n | "author": "Xinni Jiang", |
| "author_email": "", | | "author_email": "", |
| "citation": [ | | "citation": [ |
n | | n | "https://doi.org/10.48550/arXiv.2401.08123", |
| | | "https://doi.org/10.48550/arXiv.1803.02784", |
| | | "https://doi.org/10.1109/LRA.2024.3416788", |
| "https://doi.org/10.48550/arXiv.2403.10971" | | "https://doi.org/10.48550/arXiv.2203.09168" |
| ], | | ], |
| "creator_user_id": "17755db4-395a-4b3b-ac09-e8e3484ca700", | | "creator_user_id": "17755db4-395a-4b3b-ac09-e8e3484ca700", |
n | "defined_in": "https://doi.org/10.1007/978-3-030-01225-0_11", | n | "defined_in": "https://doi.org/10.48550/arXiv.1905.07005", |
| "doi": "10.57702/2lkei111", | | "doi": "10.57702/2lkei111", |
| "doi_date_published": "2024-12-02", | | "doi_date_published": "2024-12-02", |
| "doi_publisher": "TIB", | | "doi_publisher": "TIB", |
| "doi_status": true, | | "doi_status": true, |
| "domain": "https://service.tib.eu/ldmservice", | | "domain": "https://service.tib.eu/ldmservice", |
| "extra_authors": [ | | "extra_authors": [ |
| { | | { |
n | "extra_author": "Feiyang Ye", | n | "extra_author": "Zengsheng Kuang", |
| "orcid": "" | | "orcid": "" |
| }, | | }, |
| { | | { |
n | | n | "extra_author": "Chunle Guo", |
| | | "orcid": "" |
| | | }, |
| | | { |
| "extra_author": "Yu Zhang", | | "extra_author": "Ruixun Zhang", |
| | | "orcid": "" |
| | | }, |
| | | { |
| | | "extra_author": "Lei Cai", |
| | | "orcid": "" |
| | | }, |
| | | { |
| | | "extra_author": "Xiao Fan", |
| | | "orcid": "" |
| | | }, |
| | | { |
| | | "extra_author": "Chongyi Li", |
| "orcid": "" | | "orcid": "" |
| } | | } |
| ], | | ], |
| "groups": [ | | "groups": [ |
n | | n | { |
| | | "description": "", |
| | | "display_name": "3D Object Recognition", |
| | | "id": "fb966b89-2ab8-4ac3-ae8e-55e18c4de9e1", |
| | | "image_display_url": "", |
| | | "name": "3d-object-recognition", |
| | | "title": "3D Object Recognition" |
| | | }, |
| | | { |
| | | "description": "", |
| | | "display_name": "Computer Vision", |
| | | "id": "d09caf7c-26c7-4e4d-bb8e-49476a90ba25", |
| | | "image_display_url": "", |
| | | "name": "computer-vision", |
| | | "title": "Computer Vision" |
| | | }, |
| { | | { |
| "description": "", | | "description": "", |
| "display_name": "Depth Estimation", | | "display_name": "Depth Estimation", |
| "id": "1cfc3f7a-9b2f-4ee9-9d15-9883618b3218", | | "id": "1cfc3f7a-9b2f-4ee9-9d15-9883618b3218", |
| "image_display_url": "", | | "image_display_url": "", |
| "name": "depth-estimation", | | "name": "depth-estimation", |
| "title": "Depth Estimation" | | "title": "Depth Estimation" |
| }, | | }, |
| { | | { |
| "description": "", | | "description": "", |
n | | n | "display_name": "Depth Super-Resolution", |
| | | "id": "3057a183-0c40-46a3-a1e9-bb6ceab6e21c", |
| | | "image_display_url": "", |
| | | "name": "depth-super-resolution", |
| | | "title": "Depth Super-Resolution" |
| | | }, |
| | | { |
| | | "description": "", |
| "display_name": "Image Segmentation", | | "display_name": "Semantic Segmentation", |
| "id": "7c8cc5f1-a9b2-4924-82ec-9e3aa3049a04", | | "id": "8c3f2eee-f5f9-464d-9c0a-1a5e7a925c0e", |
| "image_display_url": "", | | "image_display_url": "", |
n | "name": "image-segmentation", | n | "name": "semantic-segmentation", |
| "title": "Image Segmentation" | | "title": "Semantic Segmentation" |
| }, | | }, |
| { | | { |
| "description": "", | | "description": "", |
n | "display_name": "Indoor Scene Understanding", | n | "display_name": "Visual Odometry", |
| "id": "6f671b4b-0b4d-4fe1-9769-e31ab164e205", | | "id": "27ff25a9-e176-4b6d-8035-cc6c964cd212", |
| "image_display_url": "", | | "image_display_url": "", |
n | "name": "indoor-scene-understanding", | n | "name": "visual-odometry", |
| "title": "Indoor Scene Understanding" | | "title": "Visual Odometry" |
| } | | } |
| ], | | ], |
| "id": "81ad4f95-0d38-4f88-8f1f-0fa9a6976264", | | "id": "81ad4f95-0d38-4f88-8f1f-0fa9a6976264", |
| "isopen": false, | | "isopen": false, |
n | "landing_page": "https://www.cs.cmu.edu/~ronny/nyu-depth-v2/", | n | "landing_page": "https://www.cs.nyu.edu/~silberman/datasets.html", |
| "license_title": null, | | "license_title": null, |
| "link_orkg": "", | | "link_orkg": "", |
| "metadata_created": "2024-12-02T22:47:42.528951", | | "metadata_created": "2024-12-02T22:47:42.528951", |
n | "metadata_modified": "2024-12-02T22:47:43.129229", | n | "metadata_modified": "2024-12-03T09:38:48.410739", |
| "name": "nyuv2-dataset", | | "name": "nyuv2-dataset", |
n | "notes": "The NYUv2 dataset consists of video sequences of various | n | "notes": "The NYUv2 dataset is a large-scale dataset for 3D object |
| indoor scenes recorded by RGB and Depth cameras in Microsoft Kinect. | | recognition and semantic segmentation. It contains 206 test set video |
| It contains 1,449 images with ground truth, where 795 images are for | | sequences with 135 classes.", |
| training and 654 images are for validation.", | | |
| "num_resources": 1, | | "num_resources": 0, |
| "num_tags": 8, | | "num_tags": 13, |
| "organization": { | | "organization": { |
| "approval_status": "approved", | | "approval_status": "approved", |
| "created": "2024-11-25T12:11:38.292601", | | "created": "2024-11-25T12:11:38.292601", |
| "description": "", | | "description": "", |
| "id": "079d46db-32df-4b48-91f3-0a8bc8f69559", | | "id": "079d46db-32df-4b48-91f3-0a8bc8f69559", |
| "image_url": "", | | "image_url": "", |
| "is_organization": true, | | "is_organization": true, |
| "name": "no-organization", | | "name": "no-organization", |
| "state": "active", | | "state": "active", |
| "title": "No Organization", | | "title": "No Organization", |
| "type": "organization" | | "type": "organization" |
| }, | | }, |
| "owner_org": "079d46db-32df-4b48-91f3-0a8bc8f69559", | | "owner_org": "079d46db-32df-4b48-91f3-0a8bc8f69559", |
| "private": false, | | "private": false, |
| "relationships_as_object": [], | | "relationships_as_object": [], |
| "relationships_as_subject": [], | | "relationships_as_subject": [], |
n | "resources": [ | n | "resources": [], |
| { | | |
| "cache_last_updated": null, | | |
| "cache_url": null, | | |
| "created": "2024-12-02T22:29:38", | | |
| "data": [ | | |
| "dcterms:title", | | |
| "dcterms:accessRights", | | |
| "dcterms:creator", | | |
| "dcterms:description", | | |
| "dcterms:issued", | | |
| "dcterms:language", | | |
| "dcterms:identifier", | | |
| "dcat:theme", | | |
| "dcterms:type", | | |
| "dcat:keyword", | | |
| "dcat:landingPage", | | |
| "dcterms:hasVersion", | | |
| "dcterms:format", | | |
| "mls:task", | | |
| "datacite:isDescribedBy" | | |
| ], | | |
| "description": "The json representation of the dataset with its | | |
| distributions based on DCAT.", | | |
| "format": "JSON", | | |
| "hash": "", | | |
| "id": "bf9a6d90-e84d-42c9-a0d5-2cb3a76069d2", | | |
| "last_modified": "2024-12-02T22:47:43.121532", | | |
| "metadata_modified": "2024-12-02T22:47:43.131977", | | |
| "mimetype": "application/json", | | |
| "mimetype_inner": null, | | |
| "name": "Original Metadata", | | |
| "package_id": "81ad4f95-0d38-4f88-8f1f-0fa9a6976264", | | |
| "position": 0, | | |
| "resource_type": null, | | |
| "size": 1090, | | |
| "state": "active", | | |
| "url": | | |
| resource/bf9a6d90-e84d-42c9-a0d5-2cb3a76069d2/download/metadata.json", | | |
| "url_type": "upload" | | |
| } | | |
| ], | | |
| "services_used_list": "", | | "services_used_list": "", |
| "state": "active", | | "state": "active", |
| "tags": [ | | "tags": [ |
| { | | { |
n | | n | "display_name": "3D object recognition", |
| | | "id": "0aa48b0c-9c1b-4a02-b164-e5c1c70077f1", |
| | | "name": "3D object recognition", |
| | | "state": "active", |
| | | "vocabulary_id": null |
| | | }, |
| | | { |
| | | "display_name": "Coded Visual Odometry", |
| | | "id": "fde6b232-a3ca-4e60-8ea5-cba7dabe29e5", |
| | | "name": "Coded Visual Odometry", |
| | | "state": "active", |
| | | "vocabulary_id": null |
| | | }, |
| | | { |
| "display_name": "Depth Estimation", | | "display_name": "Depth Estimation", |
| "id": "559df65c-baac-4373-9e7e-6732051e61ec", | | "id": "559df65c-baac-4373-9e7e-6732051e61ec", |
| "name": "Depth Estimation", | | "name": "Depth Estimation", |
| "state": "active", | | "state": "active", |
| "vocabulary_id": null | | "vocabulary_id": null |
| }, | | }, |
| { | | { |
n | "display_name": "Depth Map", | n | |
| "id": "6e4c85d9-46cd-4c68-96ef-5a2c1e9502b8", | | |
| "name": "Depth Map", | | |
| "state": "active", | | |
| "vocabulary_id": null | | |
| }, | | |
| { | | |
| "display_name": "Indoor Scene Understanding", | | |
| "id": "a8ea8f4b-57e4-4976-8014-1efd58e75334", | | |
| "name": "Indoor Scene Understanding", | | |
| "state": "active", | | |
| "vocabulary_id": null | | |
| }, | | |
| { | | |
| "display_name": "NYUv2", | | "display_name": "NYUv2", |
| "id": "f38a103e-ef81-4fc7-a558-3de4df053d06", | | "id": "f38a103e-ef81-4fc7-a558-3de4df053d06", |
| "name": "NYUv2", | | "name": "NYUv2", |
| "state": "active", | | "state": "active", |
| "vocabulary_id": null | | "vocabulary_id": null |
| }, | | }, |
| { | | { |
n | "display_name": "RGB", | n | "display_name": "RGB image", |
| "id": "ddaf0f57-853b-479c-92e3-64fd48ad8b68", | | "id": "e9b1c684-69af-491f-b582-0898d325720b", |
| "name": "RGB", | | "name": "RGB image", |
| "state": "active", | | "state": "active", |
| "vocabulary_id": null | | "vocabulary_id": null |
| }, | | }, |
| { | | { |
n | "display_name": "RGB-D", | n | "display_name": "Visual Odometry", |
| "id": "b4388a5d-d31a-45b3-bd20-55ea8c48a7be", | | "id": "ce8d51cd-12ca-4bca-b2bc-7d8cab580b7a", |
| "name": "RGB-D", | | "name": "Visual Odometry", |
| "state": "active", | | "state": "active", |
| "vocabulary_id": null | | "vocabulary_id": null |
| }, | | }, |
| { | | { |
| "display_name": "depth estimation", | | "display_name": "depth estimation", |
| "id": "3c08a798-cec3-4682-a668-4f95d6d8ad18", | | "id": "3c08a798-cec3-4682-a668-4f95d6d8ad18", |
| "name": "depth estimation", | | "name": "depth estimation", |
| "state": "active", | | "state": "active", |
| "vocabulary_id": null | | "vocabulary_id": null |
| }, | | }, |
| { | | { |
n | | n | "display_name": "depth map", |
| | | "id": "47c36730-c402-451a-bec4-d1194996c398", |
| | | "name": "depth map", |
| | | "state": "active", |
| | | "vocabulary_id": null |
| | | }, |
| | | { |
| | | "display_name": "depth regression", |
| | | "id": "fda0dcda-73b8-42af-a937-76169d52cecc", |
| | | "name": "depth regression", |
| | | "state": "active", |
| | | "vocabulary_id": null |
| | | }, |
| | | { |
| | | "display_name": "depth super-resolution", |
| | | "id": "1315140d-d7b1-47b3-8df9-0dc5f1ca2f64", |
| | | "name": "depth super-resolution", |
| | | "state": "active", |
| | | "vocabulary_id": null |
| | | }, |
| | | { |
| | | "display_name": "monocular depth estimation", |
| | | "id": "b343d8bd-6834-4c6b-a622-fa7a08feacfd", |
| | | "name": "monocular depth estimation", |
| | | "state": "active", |
| | | "vocabulary_id": null |
| | | }, |
| | | { |
| "display_name": "image segmentation", | | "display_name": "semantic segmentation", |
| "id": "7eaed78e-c73a-4929-a8c9-60265069f59a", | | "id": "f9237911-e9df-4dd5-a9aa-301b6d4969af", |
| "name": "image segmentation", | | "name": "semantic segmentation", |
| | | "state": "active", |
| | | "vocabulary_id": null |
| | | }, |
| | | { |
| | | "display_name": "single image", |
| | | "id": "a388f511-56bd-4b53-b3fb-011c750e5a38", |
| | | "name": "single image", |
| "state": "active", | | "state": "active", |
| "vocabulary_id": null | | "vocabulary_id": null |
| } | | } |
| ], | | ], |
t | "title": "NYUv2 Dataset", | t | "title": "NYUv2 dataset", |
| "type": "dataset", | | "type": "dataset", |
| "version": "" | | "version": "" |
| } | | } |