Subversion Repositories Kolibri OS

Rev

Rev 5078 | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5078 Rev 6296
1
/**************************************************************************
1
/**************************************************************************
2
 *
2
 *
3
 * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
3
 * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
4
 * All Rights Reserved.
4
 * All Rights Reserved.
5
 *
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
12
 * the following conditions:
13
 *
13
 *
14
 * The above copyright notice and this permission notice (including the
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
16
 * of the Software.
17
 *
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
25
 *
26
 **************************************************************************/
26
 **************************************************************************/
27
/*
27
/*
28
 * Authors: Thomas Hellstrom 
28
 * Authors: Thomas Hellstrom 
29
 */
29
 */
30
 
30
 
31
#include "vmwgfx_drv.h"
31
#include "vmwgfx_drv.h"
32
#include 
32
#include 
33
#include 
33
#include 
34
#include 
34
#include 
35
#include 
35
#include 
36
#include 
36
#include 
37
#include 
37
#include 
38
 
38
 
39
struct vmwgfx_gmrid_man {
39
struct vmwgfx_gmrid_man {
40
	spinlock_t lock;
40
	spinlock_t lock;
41
	struct ida gmr_ida;
41
	struct ida gmr_ida;
42
	uint32_t max_gmr_ids;
42
	uint32_t max_gmr_ids;
43
	uint32_t max_gmr_pages;
43
	uint32_t max_gmr_pages;
44
	uint32_t used_gmr_pages;
44
	uint32_t used_gmr_pages;
45
};
45
};
46
 
46
 
47
static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
47
static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
48
				  struct ttm_buffer_object *bo,
48
				  struct ttm_buffer_object *bo,
49
				  struct ttm_placement *placement,
49
				  const struct ttm_place *place,
50
				  uint32_t flags,
-
 
51
				  struct ttm_mem_reg *mem)
50
				  struct ttm_mem_reg *mem)
52
{
51
{
53
	struct vmwgfx_gmrid_man *gman =
52
	struct vmwgfx_gmrid_man *gman =
54
		(struct vmwgfx_gmrid_man *)man->priv;
53
		(struct vmwgfx_gmrid_man *)man->priv;
55
	int ret = 0;
54
	int ret = 0;
56
	int id;
55
	int id;
57
 
56
 
58
	mem->mm_node = NULL;
57
	mem->mm_node = NULL;
59
 
58
 
60
	spin_lock(&gman->lock);
59
	spin_lock(&gman->lock);
61
 
60
 
62
	if (gman->max_gmr_pages > 0) {
61
	if (gman->max_gmr_pages > 0) {
63
		gman->used_gmr_pages += bo->num_pages;
62
		gman->used_gmr_pages += bo->num_pages;
64
		if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
63
		if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
65
			goto out_err_locked;
64
			goto out_err_locked;
66
	}
65
	}
67
 
66
 
68
	do {
67
	do {
69
		spin_unlock(&gman->lock);
68
		spin_unlock(&gman->lock);
70
		if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0)) {
69
		if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0)) {
71
			ret = -ENOMEM;
70
			ret = -ENOMEM;
72
			goto out_err;
71
			goto out_err;
73
		}
72
		}
74
		spin_lock(&gman->lock);
73
		spin_lock(&gman->lock);
75
 
74
 
76
		ret = ida_get_new(&gman->gmr_ida, &id);
75
		ret = ida_get_new(&gman->gmr_ida, &id);
77
		if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) {
76
		if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) {
78
			ida_remove(&gman->gmr_ida, id);
77
			ida_remove(&gman->gmr_ida, id);
79
			ret = 0;
78
			ret = 0;
80
			goto out_err_locked;
79
			goto out_err_locked;
81
		}
80
		}
82
	} while (ret == -EAGAIN);
81
	} while (ret == -EAGAIN);
83
 
82
 
84
	if (likely(ret == 0)) {
83
	if (likely(ret == 0)) {
85
		mem->mm_node = gman;
84
		mem->mm_node = gman;
86
		mem->start = id;
85
		mem->start = id;
87
		mem->num_pages = bo->num_pages;
86
		mem->num_pages = bo->num_pages;
88
	} else
87
	} else
89
		goto out_err_locked;
88
		goto out_err_locked;
90
 
89
 
91
	spin_unlock(&gman->lock);
90
	spin_unlock(&gman->lock);
92
	return 0;
91
	return 0;
93
 
92
 
94
out_err:
93
out_err:
95
	spin_lock(&gman->lock);
94
	spin_lock(&gman->lock);
96
out_err_locked:
95
out_err_locked:
97
	gman->used_gmr_pages -= bo->num_pages;
96
	gman->used_gmr_pages -= bo->num_pages;
98
	spin_unlock(&gman->lock);
97
	spin_unlock(&gman->lock);
99
	return ret;
98
	return ret;
100
}
99
}
101
 
100
 
102
static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
101
static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
103
				   struct ttm_mem_reg *mem)
102
				   struct ttm_mem_reg *mem)
104
{
103
{
105
	struct vmwgfx_gmrid_man *gman =
104
	struct vmwgfx_gmrid_man *gman =
106
		(struct vmwgfx_gmrid_man *)man->priv;
105
		(struct vmwgfx_gmrid_man *)man->priv;
107
 
106
 
108
	if (mem->mm_node) {
107
	if (mem->mm_node) {
109
		spin_lock(&gman->lock);
108
		spin_lock(&gman->lock);
110
		ida_remove(&gman->gmr_ida, mem->start);
109
		ida_remove(&gman->gmr_ida, mem->start);
111
		gman->used_gmr_pages -= mem->num_pages;
110
		gman->used_gmr_pages -= mem->num_pages;
112
		spin_unlock(&gman->lock);
111
		spin_unlock(&gman->lock);
113
		mem->mm_node = NULL;
112
		mem->mm_node = NULL;
114
	}
113
	}
115
}
114
}
116
 
115
 
117
static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
116
static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
118
			      unsigned long p_size)
117
			      unsigned long p_size)
119
{
118
{
120
	struct vmw_private *dev_priv =
119
	struct vmw_private *dev_priv =
121
		container_of(man->bdev, struct vmw_private, bdev);
120
		container_of(man->bdev, struct vmw_private, bdev);
122
	struct vmwgfx_gmrid_man *gman =
121
	struct vmwgfx_gmrid_man *gman =
123
		kzalloc(sizeof(*gman), GFP_KERNEL);
122
		kzalloc(sizeof(*gman), GFP_KERNEL);
124
 
123
 
125
	if (unlikely(gman == NULL))
124
	if (unlikely(gman == NULL))
126
		return -ENOMEM;
125
		return -ENOMEM;
127
 
126
 
128
	spin_lock_init(&gman->lock);
127
	spin_lock_init(&gman->lock);
129
	gman->used_gmr_pages = 0;
128
	gman->used_gmr_pages = 0;
130
	ida_init(&gman->gmr_ida);
129
	ida_init(&gman->gmr_ida);
131
 
130
 
132
	switch (p_size) {
131
	switch (p_size) {
133
	case VMW_PL_GMR:
132
	case VMW_PL_GMR:
134
		gman->max_gmr_ids = dev_priv->max_gmr_ids;
133
		gman->max_gmr_ids = dev_priv->max_gmr_ids;
135
		gman->max_gmr_pages = dev_priv->max_gmr_pages;
134
		gman->max_gmr_pages = dev_priv->max_gmr_pages;
136
		break;
135
		break;
137
	case VMW_PL_MOB:
136
	case VMW_PL_MOB:
138
		gman->max_gmr_ids = VMWGFX_NUM_MOB;
137
		gman->max_gmr_ids = VMWGFX_NUM_MOB;
139
		gman->max_gmr_pages = dev_priv->max_mob_pages;
138
		gman->max_gmr_pages = dev_priv->max_mob_pages;
140
		break;
139
		break;
141
	default:
140
	default:
142
		BUG();
141
		BUG();
143
	}
142
	}
144
	man->priv = (void *) gman;
143
	man->priv = (void *) gman;
145
	return 0;
144
	return 0;
146
}
145
}
147
 
146
 
148
static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager *man)
147
static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager *man)
149
{
148
{
150
	struct vmwgfx_gmrid_man *gman =
149
	struct vmwgfx_gmrid_man *gman =
151
		(struct vmwgfx_gmrid_man *)man->priv;
150
		(struct vmwgfx_gmrid_man *)man->priv;
152
 
151
 
153
	if (gman) {
152
	if (gman) {
154
		ida_destroy(&gman->gmr_ida);
153
		ida_destroy(&gman->gmr_ida);
155
		kfree(gman);
154
		kfree(gman);
156
	}
155
	}
157
	return 0;
156
	return 0;
158
}
157
}
159
 
158
 
160
static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
159
static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
161
				const char *prefix)
160
				const char *prefix)
162
{
161
{
163
	printk(KERN_INFO "%s: No debug info available for the GMR "
162
	printk(KERN_INFO "%s: No debug info available for the GMR "
164
	       "id manager.\n", prefix);
163
	       "id manager.\n", prefix);
165
}
164
}
166
 
165
 
167
const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
166
const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
168
	vmw_gmrid_man_init,
167
	vmw_gmrid_man_init,
169
	vmw_gmrid_man_takedown,
168
	vmw_gmrid_man_takedown,
170
	vmw_gmrid_man_get_node,
169
	vmw_gmrid_man_get_node,
171
	vmw_gmrid_man_put_node,
170
	vmw_gmrid_man_put_node,
172
	vmw_gmrid_man_debug
171
	vmw_gmrid_man_debug
173
};
172
};