Changes
On August 4, 2023 at 8:51:54 AM UTC, admin:
-
No fields were updated. See the metadata diff for more details.
f | 1 | { | f | 1 | { |
2 | "author": "Schlagenhauf, Tobias", | 2 | "author": "Schlagenhauf, Tobias", | ||
3 | "author_email": "", | 3 | "author_email": "", | ||
4 | "creator_user_id": "17755db4-395a-4b3b-ac09-e8e3484ca700", | 4 | "creator_user_id": "17755db4-395a-4b3b-ac09-e8e3484ca700", | ||
5 | "doi": "10.35097/1278", | 5 | "doi": "10.35097/1278", | ||
6 | "doi_date_published": "2023", | 6 | "doi_date_published": "2023", | ||
7 | "doi_publisher": "", | 7 | "doi_publisher": "", | ||
8 | "doi_status": "True", | 8 | "doi_status": "True", | ||
9 | "extra_authors": [ | 9 | "extra_authors": [ | ||
10 | { | 10 | { | ||
11 | "extra_author": "Landwehr, Magnus", | 11 | "extra_author": "Landwehr, Magnus", | ||
12 | "orcid": "" | 12 | "orcid": "" | ||
13 | }, | 13 | }, | ||
14 | { | 14 | { | ||
15 | "extra_author": "Fleischer, J\u00fcrgen", | 15 | "extra_author": "Fleischer, J\u00fcrgen", | ||
16 | "orcid": "" | 16 | "orcid": "" | ||
17 | } | 17 | } | ||
18 | ], | 18 | ], | ||
19 | "groups": [], | 19 | "groups": [], | ||
20 | "id": "89a11bb7-29de-4671-b571-da5645f6fcc9", | 20 | "id": "89a11bb7-29de-4671-b571-da5645f6fcc9", | ||
21 | "isopen": false, | 21 | "isopen": false, | ||
22 | "license_id": "CC BY-SA 4.0 Attribution-ShareAlike", | 22 | "license_id": "CC BY-SA 4.0 Attribution-ShareAlike", | ||
23 | "license_title": "CC BY-SA 4.0 Attribution-ShareAlike", | 23 | "license_title": "CC BY-SA 4.0 Attribution-ShareAlike", | ||
24 | "metadata_created": "2023-08-04T08:50:26.316698", | 24 | "metadata_created": "2023-08-04T08:50:26.316698", | ||
t | 25 | "metadata_modified": "2023-08-04T08:50:26.316705", | t | 25 | "metadata_modified": "2023-08-04T08:51:54.712369", |
26 | "name": "rdr-doi-10-35097-1278", | 26 | "name": "rdr-doi-10-35097-1278", | ||
27 | "notes": "Abstract: Using Machine Learning Techniques in general and | 27 | "notes": "Abstract: Using Machine Learning Techniques in general and | ||
28 | Deep Learning techniques in specific needs a certain amount of data | 28 | Deep Learning techniques in specific needs a certain amount of data | ||
29 | often not available in large quantities in some technical domains. The | 29 | often not available in large quantities in some technical domains. The | ||
30 | manual inspection of Machine Tool Components, as well as the manual | 30 | manual inspection of Machine Tool Components, as well as the manual | ||
31 | end of line check of products, are labour intensive tasks in | 31 | end of line check of products, are labour intensive tasks in | ||
32 | industrial applications that often want to be automated by companies. | 32 | industrial applications that often want to be automated by companies. | ||
33 | To automate the classification processes and to develop reliable and | 33 | To automate the classification processes and to develop reliable and | ||
34 | robust Machine Learning based classification and wear prognostics | 34 | robust Machine Learning based classification and wear prognostics | ||
35 | models there is a need for real-world datasets to train and test | 35 | models there is a need for real-world datasets to train and test | ||
36 | models on.\r\nTechnicalRemarks: The dataset contains 1104 channel 3 | 36 | models on.\r\nTechnicalRemarks: The dataset contains 1104 channel 3 | ||
37 | images with 394 image-annotations for the surface damage type | 37 | images with 394 image-annotations for the surface damage type | ||
38 | \u201cpitting\u201d. The annotations made with the annotation tool | 38 | \u201cpitting\u201d. The annotations made with the annotation tool | ||
39 | labelme, are available in JSON format and hence convertible to VOC and | 39 | labelme, are available in JSON format and hence convertible to VOC and | ||
40 | COCO format. All images come from two BSD types. The dataset available | 40 | COCO format. All images come from two BSD types. The dataset available | ||
41 | for download is divided into two folders, data with all images as | 41 | for download is divided into two folders, data with all images as | ||
42 | JPEG, label with all annotations, and saved_model with a baseline | 42 | JPEG, label with all annotations, and saved_model with a baseline | ||
43 | model. The authors also provide a python script to divide the data and | 43 | model. The authors also provide a python script to divide the data and | ||
44 | labels into three different split types \u2013 train_test_split, which | 44 | labels into three different split types \u2013 train_test_split, which | ||
45 | splits images into the same train and test data-split the authors used | 45 | splits images into the same train and test data-split the authors used | ||
46 | for the baseline model, wear_dev_split, which creates all 27 wear | 46 | for the baseline model, wear_dev_split, which creates all 27 wear | ||
47 | developments and type_split, which splits the data into the occurring | 47 | developments and type_split, which splits the data into the occurring | ||
48 | BSD-types.\r\nOne of the two mentioned BSD types is represented with | 48 | BSD-types.\r\nOne of the two mentioned BSD types is represented with | ||
49 | 69 images and 55 different image-sizes. All images with this BSD type | 49 | 69 images and 55 different image-sizes. All images with this BSD type | ||
50 | come either in a clean or soiled condition.\r\nThe other BSD type is | 50 | come either in a clean or soiled condition.\r\nThe other BSD type is | ||
51 | shown on 325 images with two image-sizes. Since all images of this | 51 | shown on 325 images with two image-sizes. Since all images of this | ||
52 | type have been taken with continuous time the degree of soiling is | 52 | type have been taken with continuous time the degree of soiling is | ||
53 | evolving.\r\nAlso, the dataset contains as above mentioned 27 pitting | 53 | evolving.\r\nAlso, the dataset contains as above mentioned 27 pitting | ||
54 | development sequences with every 69 images.\r\n\r\nInstruction dataset | 54 | development sequences with every 69 images.\r\n\r\nInstruction dataset | ||
55 | split\r\nThe authors of this dataset provide 3 types of different | 55 | split\r\nThe authors of this dataset provide 3 types of different | ||
56 | dataset splits.\r\nTo get the data split you have to run the python | 56 | dataset splits.\r\nTo get the data split you have to run the python | ||
57 | script split_dataset.py.\r\nScript inputs:\r\n\r\nsplit-type | 57 | script split_dataset.py.\r\nScript inputs:\r\n\r\nsplit-type | ||
58 | (mandatory)\r\noutput directory (mandatory)\r\nDifferent | 58 | (mandatory)\r\noutput directory (mandatory)\r\nDifferent | ||
59 | split-types:\r\ntrain_test_split: splits dataset into train and test | 59 | split-types:\r\ntrain_test_split: splits dataset into train and test | ||
60 | data (80%/20%)\r\nwear_dev_split: splits dataset into 27 | 60 | data (80%/20%)\r\nwear_dev_split: splits dataset into 27 | ||
61 | wear-developments\r\ntype_split: splits dataset into different BSD | 61 | wear-developments\r\ntype_split: splits dataset into different BSD | ||
62 | types\r\nExample:\r\nC:\\Users\\Desktop>python split_dataset.py | 62 | types\r\nExample:\r\nC:\\Users\\Desktop>python split_dataset.py | ||
63 | --split_type=train_test_split | 63 | --split_type=train_test_split | ||
64 | output_dir=BSD_split_folder\r\n\r\nResult:\r\n./BSD_slit_folder/train/ | 64 | output_dir=BSD_split_folder\r\n\r\nResult:\r\n./BSD_slit_folder/train/ | ||
65 | and ./BSD_slit_folder/test/", | 65 | and ./BSD_slit_folder/test/", | ||
66 | "num_resources": 0, | 66 | "num_resources": 0, | ||
67 | "num_tags": 8, | 67 | "num_tags": 8, | ||
68 | "orcid": "", | 68 | "orcid": "", | ||
69 | "organization": { | 69 | "organization": { | ||
70 | "approval_status": "approved", | 70 | "approval_status": "approved", | ||
71 | "created": "2023-01-12T13:30:23.238233", | 71 | "created": "2023-01-12T13:30:23.238233", | ||
72 | "description": "RADAR (Research Data Repository) is a | 72 | "description": "RADAR (Research Data Repository) is a | ||
73 | cross-disciplinary repository for archiving and publishing research | 73 | cross-disciplinary repository for archiving and publishing research | ||
74 | data from completed scientific studies and projects. The focus is on | 74 | data from completed scientific studies and projects. The focus is on | ||
75 | research data from subjects that do not yet have their own | 75 | research data from subjects that do not yet have their own | ||
76 | discipline-specific infrastructures for research data management. ", | 76 | discipline-specific infrastructures for research data management. ", | ||
77 | "id": "013c89a9-383c-4200-8baa-0f78bf1d91f9", | 77 | "id": "013c89a9-383c-4200-8baa-0f78bf1d91f9", | ||
78 | "image_url": "radar-logo.svg", | 78 | "image_url": "radar-logo.svg", | ||
79 | "is_organization": true, | 79 | "is_organization": true, | ||
80 | "name": "radar", | 80 | "name": "radar", | ||
81 | "state": "active", | 81 | "state": "active", | ||
82 | "title": "RADAR", | 82 | "title": "RADAR", | ||
83 | "type": "organization" | 83 | "type": "organization" | ||
84 | }, | 84 | }, | ||
85 | "owner_org": "013c89a9-383c-4200-8baa-0f78bf1d91f9", | 85 | "owner_org": "013c89a9-383c-4200-8baa-0f78bf1d91f9", | ||
86 | "private": false, | 86 | "private": false, | ||
87 | "production_year": "2021", | 87 | "production_year": "2021", | ||
88 | "publication_year": "2023", | 88 | "publication_year": "2023", | ||
89 | "publishers": [ | 89 | "publishers": [ | ||
90 | { | 90 | { | ||
91 | "publisher": "Karlsruhe Institute of Technology" | 91 | "publisher": "Karlsruhe Institute of Technology" | ||
92 | } | 92 | } | ||
93 | ], | 93 | ], | ||
94 | "relationships_as_object": [], | 94 | "relationships_as_object": [], | ||
95 | "relationships_as_subject": [], | 95 | "relationships_as_subject": [], | ||
96 | "repository_name": "RADAR (Research Data Repository)", | 96 | "repository_name": "RADAR (Research Data Repository)", | ||
97 | "resources": [], | 97 | "resources": [], | ||
98 | "services_used_list": "", | 98 | "services_used_list": "", | ||
99 | "source_metadata_created": "2023", | 99 | "source_metadata_created": "2023", | ||
100 | "source_metadata_modified": "", | 100 | "source_metadata_modified": "", | ||
101 | "state": "active", | 101 | "state": "active", | ||
102 | "subject_areas": [ | 102 | "subject_areas": [ | ||
103 | { | 103 | { | ||
104 | "subject_area_additional": "", | 104 | "subject_area_additional": "", | ||
105 | "subject_area_name": "Engineering" | 105 | "subject_area_name": "Engineering" | ||
106 | } | 106 | } | ||
107 | ], | 107 | ], | ||
108 | "tags": [ | 108 | "tags": [ | ||
109 | { | 109 | { | ||
110 | "display_name": "Classification", | 110 | "display_name": "Classification", | ||
111 | "id": "cc82e2f5-be18-4e27-9bd8-0cb307b8a455", | 111 | "id": "cc82e2f5-be18-4e27-9bd8-0cb307b8a455", | ||
112 | "name": "Classification", | 112 | "name": "Classification", | ||
113 | "state": "active", | 113 | "state": "active", | ||
114 | "vocabulary_id": null | 114 | "vocabulary_id": null | ||
115 | }, | 115 | }, | ||
116 | { | 116 | { | ||
117 | "display_name": "Condition Monitoring", | 117 | "display_name": "Condition Monitoring", | ||
118 | "id": "5f393dcf-13da-49e7-829c-07e281a5a3bc", | 118 | "id": "5f393dcf-13da-49e7-829c-07e281a5a3bc", | ||
119 | "name": "Condition Monitoring", | 119 | "name": "Condition Monitoring", | ||
120 | "state": "active", | 120 | "state": "active", | ||
121 | "vocabulary_id": null | 121 | "vocabulary_id": null | ||
122 | }, | 122 | }, | ||
123 | { | 123 | { | ||
124 | "display_name": "Dataset", | 124 | "display_name": "Dataset", | ||
125 | "id": "81587eb2-9569-4a4b-83c8-0e2ac78e7e3b", | 125 | "id": "81587eb2-9569-4a4b-83c8-0e2ac78e7e3b", | ||
126 | "name": "Dataset", | 126 | "name": "Dataset", | ||
127 | "state": "active", | 127 | "state": "active", | ||
128 | "vocabulary_id": null | 128 | "vocabulary_id": null | ||
129 | }, | 129 | }, | ||
130 | { | 130 | { | ||
131 | "display_name": "Deep Learning", | 131 | "display_name": "Deep Learning", | ||
132 | "id": "3feb7b21-e049-4dca-9372-0d438c483f6a", | 132 | "id": "3feb7b21-e049-4dca-9372-0d438c483f6a", | ||
133 | "name": "Deep Learning", | 133 | "name": "Deep Learning", | ||
134 | "state": "active", | 134 | "state": "active", | ||
135 | "vocabulary_id": null | 135 | "vocabulary_id": null | ||
136 | }, | 136 | }, | ||
137 | { | 137 | { | ||
138 | "display_name": "Instance Segmentation", | 138 | "display_name": "Instance Segmentation", | ||
139 | "id": "b58d8dfe-1216-401d-8a2a-ceb09e07a013", | 139 | "id": "b58d8dfe-1216-401d-8a2a-ceb09e07a013", | ||
140 | "name": "Instance Segmentation", | 140 | "name": "Instance Segmentation", | ||
141 | "state": "active", | 141 | "state": "active", | ||
142 | "vocabulary_id": null | 142 | "vocabulary_id": null | ||
143 | }, | 143 | }, | ||
144 | { | 144 | { | ||
145 | "display_name": "Machine Learning", | 145 | "display_name": "Machine Learning", | ||
146 | "id": "c4f3defc-ca48-45a9-9217-ce35bd3ed73c", | 146 | "id": "c4f3defc-ca48-45a9-9217-ce35bd3ed73c", | ||
147 | "name": "Machine Learning", | 147 | "name": "Machine Learning", | ||
148 | "state": "active", | 148 | "state": "active", | ||
149 | "vocabulary_id": null | 149 | "vocabulary_id": null | ||
150 | }, | 150 | }, | ||
151 | { | 151 | { | ||
152 | "display_name": "Object Detection", | 152 | "display_name": "Object Detection", | ||
153 | "id": "44adc011-570b-46cf-9a65-ab72ca690477", | 153 | "id": "44adc011-570b-46cf-9a65-ab72ca690477", | ||
154 | "name": "Object Detection", | 154 | "name": "Object Detection", | ||
155 | "state": "active", | 155 | "state": "active", | ||
156 | "vocabulary_id": null | 156 | "vocabulary_id": null | ||
157 | }, | 157 | }, | ||
158 | { | 158 | { | ||
159 | "display_name": "Semantic Segmentation", | 159 | "display_name": "Semantic Segmentation", | ||
160 | "id": "809ad6af-28cd-43bd-974d-055a5c0f2973", | 160 | "id": "809ad6af-28cd-43bd-974d-055a5c0f2973", | ||
161 | "name": "Semantic Segmentation", | 161 | "name": "Semantic Segmentation", | ||
162 | "state": "active", | 162 | "state": "active", | ||
163 | "vocabulary_id": null | 163 | "vocabulary_id": null | ||
164 | } | 164 | } | ||
165 | ], | 165 | ], | ||
166 | "title": "Industrial machine tool element surface defect dataset", | 166 | "title": "Industrial machine tool element surface defect dataset", | ||
167 | "type": "vdataset", | 167 | "type": "vdataset", | ||
168 | "url": "https://doi.org/10.35097/1278" | 168 | "url": "https://doi.org/10.35097/1278" | ||
169 | } | 169 | } |