Subversion Repositories Kolibri OS

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
5564 serge 1
/*
2
 * Copyright 2010 Red Hat Inc.
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * on the rights to use, copy, modify, merge, publish, distribute, sub
8
 * license, and/or sell copies of the Software, and to permit persons to whom
9
 * the Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18
 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22
 *
23
 * Authors: Dave Airlie
24
 */
25
 
26
#include 
27
 
28
#include "util/u_inlines.h"
29
#include "util/u_memory.h"
30
#include "util/u_upload_mgr.h"
31
#include "util/u_math.h"
32
 
33
#include "r300_screen_buffer.h"
34
 
35
void r300_upload_index_buffer(struct r300_context *r300,
36
			      struct pipe_resource **index_buffer,
37
			      unsigned index_size, unsigned *start,
38
			      unsigned count, const uint8_t *ptr)
39
{
40
    unsigned index_offset;
41
 
42
    *index_buffer = NULL;
43
 
44
    u_upload_data(r300->uploader,
45
                  0, count * index_size,
46
                  ptr + (*start * index_size),
47
                  &index_offset,
48
                  index_buffer);
49
 
50
    *start = index_offset / index_size;
51
}
52
 
53
static void r300_buffer_destroy(struct pipe_screen *screen,
54
				struct pipe_resource *buf)
55
{
56
    struct r300_resource *rbuf = r300_resource(buf);
57
 
58
    align_free(rbuf->malloced_buffer);
59
 
60
    if (rbuf->buf)
61
        pb_reference(&rbuf->buf, NULL);
62
 
63
    FREE(rbuf);
64
}
65
 
66
static void *
67
r300_buffer_transfer_map( struct pipe_context *context,
68
                          struct pipe_resource *resource,
69
                          unsigned level,
70
                          unsigned usage,
71
                          const struct pipe_box *box,
72
                          struct pipe_transfer **ptransfer )
73
{
74
    struct r300_context *r300 = r300_context(context);
75
    struct radeon_winsys *rws = r300->screen->rws;
76
    struct r300_resource *rbuf = r300_resource(resource);
77
    struct pipe_transfer *transfer;
78
    uint8_t *map;
79
 
80
    transfer = util_slab_alloc(&r300->pool_transfers);
81
    transfer->resource = resource;
82
    transfer->level = level;
83
    transfer->usage = usage;
84
    transfer->box = *box;
85
    transfer->stride = 0;
86
    transfer->layer_stride = 0;
87
 
88
    if (rbuf->malloced_buffer) {
89
        *ptransfer = transfer;
90
        return rbuf->malloced_buffer + box->x;
91
    }
92
 
93
    if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE &&
94
        !(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
95
        assert(usage & PIPE_TRANSFER_WRITE);
96
 
97
        /* Check if mapping this buffer would cause waiting for the GPU. */
98
        if (r300->rws->cs_is_buffer_referenced(r300->cs, rbuf->cs_buf, RADEON_USAGE_READWRITE) ||
99
            r300->rws->buffer_is_busy(rbuf->buf, RADEON_USAGE_READWRITE)) {
100
            unsigned i;
101
            struct pb_buffer *new_buf;
102
 
103
            /* Create a new one in the same pipe_resource. */
104
            new_buf = r300->rws->buffer_create(r300->rws, rbuf->b.b.width0,
105
                                               R300_BUFFER_ALIGNMENT, TRUE,
106
                                               rbuf->domain, 0);
107
            if (new_buf) {
108
                /* Discard the old buffer. */
109
                pb_reference(&rbuf->buf, NULL);
110
                rbuf->buf = new_buf;
111
                rbuf->cs_buf = r300->rws->buffer_get_cs_handle(rbuf->buf);
112
 
113
                /* We changed the buffer, now we need to bind it where the old one was bound. */
114
                for (i = 0; i < r300->nr_vertex_buffers; i++) {
115
                    if (r300->vertex_buffer[i].buffer == &rbuf->b.b) {
116
                        r300->vertex_arrays_dirty = TRUE;
117
                        break;
118
                    }
119
                }
120
            }
121
        }
122
    }
123
 
124
    /* Buffers are never used for write, therefore mapping for read can be
125
     * unsynchronized. */
126
    if (!(usage & PIPE_TRANSFER_WRITE)) {
127
       usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
128
    }
129
 
130
    map = rws->buffer_map(rbuf->cs_buf, r300->cs, usage);
131
 
132
    if (map == NULL) {
133
        util_slab_free(&r300->pool_transfers, transfer);
134
        return NULL;
135
    }
136
 
137
    *ptransfer = transfer;
138
    return map + box->x;
139
}
140
 
141
static void r300_buffer_transfer_unmap( struct pipe_context *pipe,
142
                                        struct pipe_transfer *transfer )
143
{
144
    struct r300_context *r300 = r300_context(pipe);
145
 
146
    util_slab_free(&r300->pool_transfers, transfer);
147
}
148
 
149
static const struct u_resource_vtbl r300_buffer_vtbl =
150
{
151
   NULL,                               /* get_handle */
152
   r300_buffer_destroy,                /* resource_destroy */
153
   r300_buffer_transfer_map,           /* transfer_map */
154
   NULL,                               /* transfer_flush_region */
155
   r300_buffer_transfer_unmap,         /* transfer_unmap */
156
   NULL   /* transfer_inline_write */
157
};
158
 
159
struct pipe_resource *r300_buffer_create(struct pipe_screen *screen,
160
					 const struct pipe_resource *templ)
161
{
162
    struct r300_screen *r300screen = r300_screen(screen);
163
    struct r300_resource *rbuf;
164
 
165
    rbuf = MALLOC_STRUCT(r300_resource);
166
 
167
    rbuf->b.b = *templ;
168
    rbuf->b.vtbl = &r300_buffer_vtbl;
169
    pipe_reference_init(&rbuf->b.b.reference, 1);
170
    rbuf->b.b.screen = screen;
171
    rbuf->domain = RADEON_DOMAIN_GTT;
172
    rbuf->buf = NULL;
173
    rbuf->malloced_buffer = NULL;
174
 
175
    /* Allocate constant buffers and SWTCL vertex and index buffers in RAM.
176
     * Note that uploaded index buffers use the flag PIPE_BIND_CUSTOM, so that
177
     * we can distinguish them from user-created buffers.
178
     */
179
    if (templ->bind & PIPE_BIND_CONSTANT_BUFFER ||
180
        (!r300screen->caps.has_tcl && !(templ->bind & PIPE_BIND_CUSTOM))) {
181
        rbuf->malloced_buffer = align_malloc(templ->width0, 64);
182
        return &rbuf->b.b;
183
    }
184
 
185
    rbuf->buf =
186
        r300screen->rws->buffer_create(r300screen->rws, rbuf->b.b.width0,
187
                                       R300_BUFFER_ALIGNMENT, TRUE,
188
                                       rbuf->domain, 0);
189
    if (!rbuf->buf) {
190
        FREE(rbuf);
191
        return NULL;
192
    }
193
 
194
    rbuf->cs_buf =
195
        r300screen->rws->buffer_get_cs_handle(rbuf->buf);
196
 
197
    return &rbuf->b.b;
198
}