1 /*
2  * Copyright (c) 2020, Google, Inc. All rights reserved
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <err.h>
25 
26 #include <kernel/vm.h>
27 #include <lib/trusty/handle.h>
28 #include <lib/trusty/memref.h>
29 #include <lib/trusty/trusty_app.h>
30 
31 #include <uapi/mm.h>
32 
33 #include <trace.h>
34 
35 #include "util.h"
36 
37 #define LOCAL_TRACE 0
38 
39 /**
40  * struct memref
41  * @slice:     &struct vmm_obj_slice which will back a mapping of the memref
42  * @handle:    Handle that may be referenced and transferred between userspace
43  *             processes.
44  * @mmap_prot: Protections to be enforced on the slice beyond what its
45  *             check_flags function forces. Should be a mask from the
46  *             MMAP_PROT_ family of flags.
47  */
48 struct memref {
49     struct vmm_obj_slice slice;
50     struct handle handle;
51     uint32_t mmap_prot;
52 };
53 
54 /* This is only safe to call when the handle is destroyed */
memref_destroy(struct memref * memref)55 static void memref_destroy(struct memref* memref) {
56     LTRACEF("dropping memref\n");
57     vmm_obj_slice_release(&memref->slice);
58     free(memref);
59 }
60 
memref_handle_destroy(struct handle * memref_handle)61 static void memref_handle_destroy(struct handle* memref_handle) {
62     DEBUG_ASSERT(memref_handle);
63     struct memref* memref = containerof(memref_handle, struct memref, handle);
64     memref_destroy(memref);
65 }
66 
memref_mmap(struct handle * handle,size_t offset,user_size_t size,uint32_t mmap_prot,user_addr_t * addr)67 static status_t memref_mmap(struct handle* handle,
68                             size_t offset,
69                             user_size_t size,
70                             uint32_t mmap_prot,
71                             user_addr_t* addr) {
72     DEBUG_ASSERT(handle);
73     DEBUG_ASSERT(addr);
74 
75     LTRACEF("entered\n");
76 
77     status_t rc;
78     struct memref* memref = containerof(handle, struct memref, handle);
79     uint arch_mmu_flags = 0;
80 
81     if (!IS_PAGE_ALIGNED(offset)) {
82         LTRACEF("unaligned offset");
83         return ERR_INVALID_ARGS;
84     }
85 
86     if (!IS_PAGE_ALIGNED(size)) {
87         LTRACEF("unaligned size");
88         return ERR_INVALID_ARGS;
89     }
90 
91     if (offset > memref->slice.size) {
92         LTRACEF("bad offset\n");
93         return ERR_ACCESS_DENIED;
94     }
95 
96     if (size > memref->slice.size - offset) {
97         LTRACEF("bad size\n");
98         return ERR_ACCESS_DENIED;
99     }
100 
101     rc = xlat_flags(memref->mmap_prot, mmap_prot, &arch_mmu_flags);
102     if (rc) {
103         LTRACEF("xlat_flags failed\n");
104         return rc;
105     }
106 
107     struct trusty_app* app = current_trusty_app();
108     assert(app);
109 
110     void* vaddr = (void*)(vaddr_t)*addr;
111 
112     rc = vmm_alloc_obj(app->aspace, "memref", memref->slice.obj,
113                        memref->slice.offset + offset, size, &vaddr, 0, 0,
114                        arch_mmu_flags);
115     if (rc) {
116         LTRACEF("vmm_alloc_obj failed\n");
117         return rc;
118     }
119 
120     *addr = (user_addr_t)((uintptr_t)vaddr);
121 
122     LTRACEF("success\n");
123     return NO_ERROR;
124 }
125 
126 static struct handle_ops memref_handle_ops = {
127         .destroy = memref_handle_destroy,
128         .mmap = memref_mmap,
129 };
130 
memref_create(uint32_t mmap_prot)131 static struct memref* memref_create(uint32_t mmap_prot) {
132     /* defensive zero, this should full initialize */
133     struct memref* memref = calloc(1, sizeof(*memref));
134     if (!memref) {
135         return NULL;
136     }
137 
138     vmm_obj_slice_init(&memref->slice);
139     handle_init_etc(&memref->handle, &memref_handle_ops, 0);
140     memref->mmap_prot = mmap_prot;
141     return memref;
142 }
143 
check_slice(struct vmm_obj_slice * slice,uint32_t mmap_prot)144 static status_t check_slice(struct vmm_obj_slice *slice, uint32_t mmap_prot) {
145     if (!IS_PAGE_ALIGNED(slice->size) || !IS_PAGE_ALIGNED(slice->offset)) {
146         LTRACEF("unaligned\n");
147         return ERR_INVALID_ARGS;
148     }
149 
150     uint arch_mmu_flags = 0;
151     status_t rc = xlat_flags(mmap_prot, mmap_prot, &arch_mmu_flags);
152     if (rc) {
153         LTRACEF("xlat_flags failed\n");
154         return rc;
155     }
156     rc = slice->obj->ops->check_flags(slice->obj, &arch_mmu_flags);
157     if (rc) {
158         LTRACEF("check_flags failed\n");
159         return rc;
160     }
161 
162     return NO_ERROR;
163 }
164 
memref_create_from_vmm_obj(struct vmm_obj * obj,size_t offset,size_t size,uint32_t mmap_prot,struct handle ** handle)165 status_t memref_create_from_vmm_obj(struct vmm_obj *obj,
166                                     size_t offset,
167                                     size_t size,
168                                     uint32_t mmap_prot,
169                                     struct handle** handle) {
170     DEBUG_ASSERT(obj);
171 
172     struct memref *memref = memref_create(mmap_prot);
173     if (!memref) {
174         return ERR_NO_MEMORY;
175     }
176 
177     vmm_obj_slice_bind(&memref->slice, obj, offset, size);
178 
179     status_t rc = check_slice(&memref->slice, mmap_prot);
180     if (rc) {
181         goto err;
182     }
183 
184     *handle = &memref->handle;
185 
186     return NO_ERROR;
187 
188 err:
189     handle_decref(&memref->handle);
190     return rc;
191 }
192 
memref_create_from_aspace(const vmm_aspace_t * aspace,vaddr_t vaddr,size_t size,uint32_t mmap_prot,struct handle ** handle)193 status_t memref_create_from_aspace(const vmm_aspace_t *aspace,
194                                    vaddr_t vaddr,
195                                    size_t size,
196                                    uint32_t mmap_prot,
197                                    struct handle** handle) {
198     DEBUG_ASSERT(aspace);
199     DEBUG_ASSERT(handle);
200 
201     struct memref* memref = memref_create(mmap_prot);
202     if (!memref) {
203         return ERR_NO_MEMORY;
204     }
205 
206     status_t rc = vmm_get_obj(aspace, vaddr, size, &memref->slice);
207     if (rc) {
208         LTRACEF("vmm_get_obj failed: %d\n", rc);
209         goto err;
210     }
211 
212     rc = check_slice(&memref->slice, mmap_prot);
213     if (rc) {
214         goto err;
215     }
216 
217     *handle = &memref->handle;
218 
219     return NO_ERROR;
220 
221 err:
222     handle_decref(&memref->handle);
223     return rc;
224 }
225 
handle_is_memref(struct handle * handle)226 static bool handle_is_memref(struct handle* handle) {
227     return handle->ops == &memref_handle_ops;
228 }
229 
memref_handle_to_vmm_obj(struct handle * handle)230 struct vmm_obj* memref_handle_to_vmm_obj(struct handle* handle) {
231     if (handle_is_memref(handle)) {
232         return containerof(handle, struct memref, handle)->slice.obj;
233     } else {
234         return NULL;
235     }
236 }
237