Changes
On December 16, 2024 at 11:01:01 PM UTC, admin:
-
Changed value of field
doi_status
toTrue
in Multi-Agent Reinforcement Learning with a Hierarchy of Reward Machines -
Changed value of field
doi_date_published
to2024-12-16
in Multi-Agent Reinforcement Learning with a Hierarchy of Reward Machines -
Added resource Original Metadata to Multi-Agent Reinforcement Learning with a Hierarchy of Reward Machines
f | 1 | { | f | 1 | { |
2 | "access_rights": "", | 2 | "access_rights": "", | ||
3 | "author": "Xuejing Zheng", | 3 | "author": "Xuejing Zheng", | ||
4 | "author_email": "", | 4 | "author_email": "", | ||
5 | "citation": [], | 5 | "citation": [], | ||
6 | "creator_user_id": "17755db4-395a-4b3b-ac09-e8e3484ca700", | 6 | "creator_user_id": "17755db4-395a-4b3b-ac09-e8e3484ca700", | ||
7 | "defined_in": "https://doi.org/10.48550/arXiv.2403.07005", | 7 | "defined_in": "https://doi.org/10.48550/arXiv.2403.07005", | ||
8 | "doi": "10.57702/ho6yzwwp", | 8 | "doi": "10.57702/ho6yzwwp", | ||
n | 9 | "doi_date_published": null, | n | 9 | "doi_date_published": "2024-12-16", |
10 | "doi_publisher": "TIB", | 10 | "doi_publisher": "TIB", | ||
n | 11 | "doi_status": false, | n | 11 | "doi_status": true, |
12 | "domain": "https://service.tib.eu/ldmservice", | 12 | "domain": "https://service.tib.eu/ldmservice", | ||
13 | "extra_authors": [ | 13 | "extra_authors": [ | ||
14 | { | 14 | { | ||
15 | "extra_author": "Chao Yu", | 15 | "extra_author": "Chao Yu", | ||
16 | "orcid": "" | 16 | "orcid": "" | ||
17 | } | 17 | } | ||
18 | ], | 18 | ], | ||
19 | "groups": [ | 19 | "groups": [ | ||
20 | { | 20 | { | ||
21 | "description": "", | 21 | "description": "", | ||
22 | "display_name": "Multi-Agent Reinforcement Learning", | 22 | "display_name": "Multi-Agent Reinforcement Learning", | ||
23 | "id": "f5bc2f02-6c0c-4897-a832-ad948db63a83", | 23 | "id": "f5bc2f02-6c0c-4897-a832-ad948db63a83", | ||
24 | "image_display_url": "", | 24 | "image_display_url": "", | ||
25 | "name": "multi-agent-reinforcement-learning", | 25 | "name": "multi-agent-reinforcement-learning", | ||
26 | "title": "Multi-Agent Reinforcement Learning" | 26 | "title": "Multi-Agent Reinforcement Learning" | ||
27 | } | 27 | } | ||
28 | ], | 28 | ], | ||
29 | "id": "71d36126-8423-4ebf-9843-08eb6c5c4b52", | 29 | "id": "71d36126-8423-4ebf-9843-08eb6c5c4b52", | ||
30 | "isopen": false, | 30 | "isopen": false, | ||
31 | "landing_page": "", | 31 | "landing_page": "", | ||
32 | "license_title": null, | 32 | "license_title": null, | ||
33 | "link_orkg": "", | 33 | "link_orkg": "", | ||
34 | "metadata_created": "2024-12-16T23:01:00.063806", | 34 | "metadata_created": "2024-12-16T23:01:00.063806", | ||
n | 35 | "metadata_modified": "2024-12-16T23:01:00.063811", | n | 35 | "metadata_modified": "2024-12-16T23:01:00.550908", |
36 | "name": | 36 | "name": | ||
37 | lti-agent-reinforcement-learning-with-a-hierarchy-of-reward-machines", | 37 | lti-agent-reinforcement-learning-with-a-hierarchy-of-reward-machines", | ||
38 | "notes": "The dataset used in the paper is a hierarchical structure | 38 | "notes": "The dataset used in the paper is a hierarchical structure | ||
39 | of propositions, where a higher-level proposition is a temporal | 39 | of propositions, where a higher-level proposition is a temporal | ||
40 | abstraction of lower-level propositions. Each proposition represents a | 40 | abstraction of lower-level propositions. Each proposition represents a | ||
41 | subtask, which is assigned to a group of agents that coordinate to | 41 | subtask, which is assigned to a group of agents that coordinate to | ||
42 | make the proposition become true.", | 42 | make the proposition become true.", | ||
n | 43 | "num_resources": 0, | n | 43 | "num_resources": 1, |
44 | "num_tags": 3, | 44 | "num_tags": 3, | ||
45 | "organization": { | 45 | "organization": { | ||
46 | "approval_status": "approved", | 46 | "approval_status": "approved", | ||
47 | "created": "2024-11-25T12:11:38.292601", | 47 | "created": "2024-11-25T12:11:38.292601", | ||
48 | "description": "", | 48 | "description": "", | ||
49 | "id": "079d46db-32df-4b48-91f3-0a8bc8f69559", | 49 | "id": "079d46db-32df-4b48-91f3-0a8bc8f69559", | ||
50 | "image_url": "", | 50 | "image_url": "", | ||
51 | "is_organization": true, | 51 | "is_organization": true, | ||
52 | "name": "no-organization", | 52 | "name": "no-organization", | ||
53 | "state": "active", | 53 | "state": "active", | ||
54 | "title": "No Organization", | 54 | "title": "No Organization", | ||
55 | "type": "organization" | 55 | "type": "organization" | ||
56 | }, | 56 | }, | ||
57 | "owner_org": "079d46db-32df-4b48-91f3-0a8bc8f69559", | 57 | "owner_org": "079d46db-32df-4b48-91f3-0a8bc8f69559", | ||
58 | "private": false, | 58 | "private": false, | ||
59 | "relationships_as_object": [], | 59 | "relationships_as_object": [], | ||
60 | "relationships_as_subject": [], | 60 | "relationships_as_subject": [], | ||
t | 61 | "resources": [], | t | 61 | "resources": [ |
62 | { | ||||
63 | "cache_last_updated": null, | ||||
64 | "cache_url": null, | ||||
65 | "created": "2024-12-16T23:37:08", | ||||
66 | "data": [ | ||||
67 | "dcterms:title", | ||||
68 | "dcterms:accessRights", | ||||
69 | "dcterms:creator", | ||||
70 | "dcterms:description", | ||||
71 | "dcterms:issued", | ||||
72 | "dcterms:language", | ||||
73 | "dcterms:identifier", | ||||
74 | "dcat:theme", | ||||
75 | "dcterms:type", | ||||
76 | "dcat:keyword", | ||||
77 | "dcat:landingPage", | ||||
78 | "dcterms:hasVersion", | ||||
79 | "dcterms:format", | ||||
80 | "mls:task", | ||||
81 | "datacite:isDescribedBy" | ||||
82 | ], | ||||
83 | "description": "The json representation of the dataset with its | ||||
84 | distributions based on DCAT.", | ||||
85 | "format": "JSON", | ||||
86 | "hash": "", | ||||
87 | "id": "c32e471d-81a3-4104-bed4-1e2307e725cd", | ||||
88 | "last_modified": "2024-12-16T23:01:00.543144", | ||||
89 | "metadata_modified": "2024-12-16T23:01:00.553958", | ||||
90 | "mimetype": "application/json", | ||||
91 | "mimetype_inner": null, | ||||
92 | "name": "Original Metadata", | ||||
93 | "package_id": "71d36126-8423-4ebf-9843-08eb6c5c4b52", | ||||
94 | "position": 0, | ||||
95 | "resource_type": null, | ||||
96 | "size": 982, | ||||
97 | "state": "active", | ||||
98 | "url": | ||||
99 | resource/c32e471d-81a3-4104-bed4-1e2307e725cd/download/metadata.json", | ||||
100 | "url_type": "upload" | ||||
101 | } | ||||
102 | ], | ||||
62 | "services_used_list": "", | 103 | "services_used_list": "", | ||
63 | "state": "active", | 104 | "state": "active", | ||
64 | "tags": [ | 105 | "tags": [ | ||
65 | { | 106 | { | ||
66 | "display_name": "Hierarchical Structure", | 107 | "display_name": "Hierarchical Structure", | ||
67 | "id": "b637a6ae-ffc9-4c1c-8b81-1f3cb1571bc3", | 108 | "id": "b637a6ae-ffc9-4c1c-8b81-1f3cb1571bc3", | ||
68 | "name": "Hierarchical Structure", | 109 | "name": "Hierarchical Structure", | ||
69 | "state": "active", | 110 | "state": "active", | ||
70 | "vocabulary_id": null | 111 | "vocabulary_id": null | ||
71 | }, | 112 | }, | ||
72 | { | 113 | { | ||
73 | "display_name": "Multi-Agent Reinforcement Learning", | 114 | "display_name": "Multi-Agent Reinforcement Learning", | ||
74 | "id": "4e2a71ae-0b76-48bf-b6bc-6e8310f2a7b4", | 115 | "id": "4e2a71ae-0b76-48bf-b6bc-6e8310f2a7b4", | ||
75 | "name": "Multi-Agent Reinforcement Learning", | 116 | "name": "Multi-Agent Reinforcement Learning", | ||
76 | "state": "active", | 117 | "state": "active", | ||
77 | "vocabulary_id": null | 118 | "vocabulary_id": null | ||
78 | }, | 119 | }, | ||
79 | { | 120 | { | ||
80 | "display_name": "Reward Machines", | 121 | "display_name": "Reward Machines", | ||
81 | "id": "94449a09-025f-42b0-872b-a6eabc799203", | 122 | "id": "94449a09-025f-42b0-872b-a6eabc799203", | ||
82 | "name": "Reward Machines", | 123 | "name": "Reward Machines", | ||
83 | "state": "active", | 124 | "state": "active", | ||
84 | "vocabulary_id": null | 125 | "vocabulary_id": null | ||
85 | } | 126 | } | ||
86 | ], | 127 | ], | ||
87 | "title": "Multi-Agent Reinforcement Learning with a Hierarchy of | 128 | "title": "Multi-Agent Reinforcement Learning with a Hierarchy of | ||
88 | Reward Machines", | 129 | Reward Machines", | ||
89 | "type": "dataset", | 130 | "type": "dataset", | ||
90 | "version": "" | 131 | "version": "" | ||
91 | } | 132 | } |