1/**
2 * \file
3 * \brief
4 */
5
6/*
7 * Copyright (c) 2014 ETH Zurich.
8 * All rights reserved.
9 *
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
13 */
14
15#include <barrelfish/barrelfish.h>
16
17#include <bulk_transfer/bulk_transfer.h>
18
19#include "bulk_pool.h"
20#include "bulk_buffer.h"
21
22/**
23 * does the mapping of the buffer by filling the backing memobj with the frame
24 * and faulting on it. This is a no-op in full trusted mode.
25 *
26 * @param buf   the buffer to map
27 *
28 * Note: The new state of the buffer as well as the backing capability must be
29 *       set in the buffer struct.
30 */
31errval_t bulk_buffer_map(struct bulk_buffer *buffer)
32{
33    assert(buffer);
34
35    errval_t err;
36    struct bulk_pool_internal *pool_int;
37
38    if (buffer->pool->trust == BULK_TRUST_FULL) {
39        /* mapping in trusted case is a no-op */
40        return SYS_ERR_OK;
41    }
42
43    /* sanity check */
44    if (buffer->state == BULK_BUFFER_INVALID || capref_is_null(buffer->cap)) {
45        return BULK_TRANSFER_BUFFER_INVALID;
46    }
47
48    pool_int = (struct bulk_pool_internal *) buffer->pool;
49
50    struct vregion *vregion = pool_int->vregion;
51    struct memobj *memobj = vregion_get_memobj(vregion);
52
53    size_t size = buffer->pool->buffer_size;
54    size_t offset = size * buffer->bufferid;
55    //if we have never seen this buffer before, it's address will not be set yet
56    buffer->address = (void *) vspace_genvaddr_to_lvaddr(
57                                        buffer->pool->base_address + offset);
58
59    /* the capability was revoked thus we have to insert it again */
60    err = memobj->f.fill(memobj, offset, buffer->cap, buffer->cap_offset);
61    if (err_is_fail(err)) {
62        /* TODO: error handling */
63        debug_printf("offset = %p, base=%p", (void *) offset,
64                     (void *) buffer->pool->base_address);
65        return err_push(err, LIB_ERR_MEMOBJ_FILL);
66    }
67
68    /* there is a frame cap that backs the buffer, we can do the mapping */
69    err = memobj->f.pagefault(memobj, vregion, offset, 0);
70    if (err_is_fail(err)) {
71        return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER);
72    }
73
74    if (buffer->state != BULK_BUFFER_READ_WRITE) {
75        err = memobj->f.protect(memobj, vregion, offset, size,
76        VREGION_FLAGS_READ);
77        if (err_is_fail(err)) {
78            return err_push(err, LIB_ERR_MEMOBJ_PROTECT);
79        }
80    }
81
82    return SYS_ERR_OK;
83}
84
85/**
86 * does the unmapping of a single buffer according to the trust level,
87 * - if the channel is fully trusted, this results in a no-op.
88 * - otherwise, the mapping is removed
89 *
90 * This function does not revoke or delete any capabilities
91 *
92 * @param buf   the buffer to unmap
93 */
94errval_t bulk_buffer_unmap(struct bulk_buffer *buffer)
95{
96    assert(buffer);
97    assert(buffer->state == BULK_BUFFER_INVALID);
98
99    errval_t err;
100    struct bulk_pool_internal *pool_int;
101
102    /* if there is a full trusted channel, then this is a no-op */
103    if (buffer->pool->trust == BULK_TRUST_FULL) {
104        return SYS_ERR_OK;
105    }
106
107    pool_int = (struct bulk_pool_internal *) buffer->pool;
108
109    struct vregion *vregion = pool_int->vregion;
110    struct memobj *memobj = vregion_get_memobj(vregion);
111
112    size_t offset = (lvaddr_t) buffer->address - buffer->pool->base_address;
113
114    struct capref buf_cap;
115    genvaddr_t ret_offset;
116    /*
117     * we have to remove the capability from the memory object,
118     * this will be revoked by the other side anyway. This does also
119     * the unmapping of the frame.
120     */
121    err = memobj->f.unfill(memobj, offset, &buf_cap, &ret_offset);
122    if (err_is_fail(err)) {
123        /* TODO: ERROR handling */
124        return err;
125    }
126    if (ret_offset != offset || buf_cap.slot != buffer->cap.slot) {
127        /* there is something wrong... */
128        /* TODO: error handling */
129    }
130
131    /* TODO: do we want to update the state of the buffer to INVALID here ? */
132
133    return SYS_ERR_OK;
134}
135
136/**
137 * changes the protection bits of an already mapped buffer according to the
138 * current buffer state.
139 *
140 * @param buffer    the buffer to modify the protection
141 */
142static errval_t bulk_buffer_remap(struct bulk_buffer *buffer)
143{
144    assert(buffer);
145    assert(buffer->state != BULK_BUFFER_INVALID);
146    errval_t err;
147
148    if (buffer->pool->trust == BULK_TRUST_FULL) {
149        return SYS_ERR_OK;
150    }
151
152    struct bulk_pool_internal *pool_int = (struct bulk_pool_internal *) buffer
153                    ->pool;
154
155    struct vregion *vregion = pool_int->vregion;
156    struct memobj *memobj = vregion_get_memobj(vregion);
157
158    size_t offset = (lvaddr_t) buffer->address - buffer->pool->base_address;
159    size_t size = buffer->pool->buffer_size;
160
161    vs_prot_flags_t flags = VREGION_FLAGS_READ;
162    if (buffer->state == BULK_BUFFER_READ_WRITE) {
163        flags = VREGION_FLAGS_READ_WRITE;
164    }
165    err = memobj->f.protect(memobj, vregion, offset, size, flags);
166    if (err_is_fail(err)) {
167        return err_push(err, LIB_ERR_MEMOBJ_PROTECT);
168    }
169
170    return SYS_ERR_OK;
171}
172
173/**
174 * checks if the buffer is owned by the calling domain
175 *
176 * @param buf   buffer to check for ownership
177 */
178uint8_t bulk_buffer_is_owner(struct bulk_buffer *buffer)
179{
180    return ((buffer->state == BULK_BUFFER_RO_OWNED)
181                    || buffer->state == BULK_BUFFER_READ_WRITE);
182}
183
184/**
185 * checks if the buffer is a read only copy
186 *
187 * @param buffer    the buffer to check
188 *
189 * @return true     if the buffer is a read only copy
190 *         false    if the buffer is not a copy
191 */
192uint8_t bulk_buffer_is_copy(struct bulk_buffer *buffer)
193{
194    return ((buffer->state == BULK_BUFFER_RO_OWNED)
195                    || buffer->state == BULK_BUFFER_READ_ONLY);
196}
197
198/**
199 * checks if the buffer is valid
200 *
201 * @param buffer    the buffer to check
202 *
203 * @return true     if the buffer is valid
204 *         false    if the buffer is not valid
205 */
206uint8_t bulk_buffer_is_valid(struct bulk_buffer *buffer)
207{
208    return !(buffer->state == BULK_BUFFER_INVALID);
209}
210
211/**
212 * changes the state of the buffer and adjust the mappings accordingly
213 *
214 * @param buffer    the buffer to change the state
215 * @param state     new state to transition the buffer to
216 */
217errval_t bulk_buffer_change_state(struct bulk_buffer *buffer,
218                                  enum bulk_buffer_state new_state)
219{
220    assert(buffer);
221
222    errval_t err = SYS_ERR_OK;
223
224    enum bulk_buffer_state st = buffer->state;
225
226    if (st == new_state) {
227        /* no change in state */
228        return SYS_ERR_OK;
229    }
230
231    if (st == BULK_BUFFER_READ_WRITE) {
232        buffer->state = new_state;
233        switch (new_state) {
234            case BULK_BUFFER_RO_OWNED:
235            case BULK_BUFFER_READ_ONLY:
236                err = bulk_buffer_remap(buffer);
237                break;
238            case BULK_BUFFER_INVALID:
239                err = bulk_buffer_unmap(buffer);
240                break;
241            default:
242                /* NO-OP */
243                break;
244        }
245    } else if (bulk_buffer_is_read_only(buffer)) {
246        buffer->state = new_state;
247        switch (new_state) {
248            case BULK_BUFFER_READ_WRITE:
249                err = bulk_buffer_remap(buffer);
250                break;
251            case BULK_BUFFER_INVALID:
252                err = bulk_buffer_unmap(buffer);
253                break;
254            default:
255                /* NO-OP */
256                break;
257        }
258    } else if (st == BULK_BUFFER_INVALID) {
259        buffer->state = new_state;
260        err = bulk_buffer_map(buffer);
261    }
262
263    if (err_is_fail(err)) {
264        /* TODO: Error handling */
265        return err;
266    }
267
268    return SYS_ERR_OK;
269}
270
271/**
272 * Sets a cap + offset pair for a buffer.
273 *
274 * @param buffer     the buffer
275 * @param cap        cap to assign
276 * @param cap_offset offset in the cap
277 */
278errval_t bulk_buffer_assign_cap(struct bulk_buffer *buffer,
279                                struct capref       cap,
280                                size_t              cap_offset)
281{
282    errval_t err;
283    struct frame_identity fid = { 0, 0 };
284
285    buffer->cap = cap;
286    buffer->cap_offset = cap_offset;
287
288    err = frame_identify(cap, &fid);
289    buffer->phys = fid.base + cap_offset;
290
291    return err;
292}
293
294