Rev 4080 | Rev 4569 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 4080 | Rev 4111 | ||
---|---|---|---|
1 | /************************************************************************** |
1 | /************************************************************************** |
2 | * |
2 | * |
3 | * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA |
3 | * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA |
4 | * All Rights Reserved. |
4 | * All Rights Reserved. |
5 | * |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the |
7 | * copy of this software and associated documentation files (the |
8 | * "Software"), to deal in the Software without restriction, including |
8 | * "Software"), to deal in the Software without restriction, including |
9 | * without limitation the rights to use, copy, modify, merge, publish, |
9 | * without limitation the rights to use, copy, modify, merge, publish, |
10 | * distribute, sub license, and/or sell copies of the Software, and to |
10 | * distribute, sub license, and/or sell copies of the Software, and to |
11 | * permit persons to whom the Software is furnished to do so, subject to |
11 | * permit persons to whom the Software is furnished to do so, subject to |
12 | * the following conditions: |
12 | * the following conditions: |
13 | * |
13 | * |
14 | * The above copyright notice and this permission notice (including the |
14 | * The above copyright notice and this permission notice (including the |
15 | * next paragraph) shall be included in all copies or substantial portions |
15 | * next paragraph) shall be included in all copies or substantial portions |
16 | * of the Software. |
16 | * of the Software. |
17 | * |
17 | * |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
25 | * |
25 | * |
26 | **************************************************************************/ |
26 | **************************************************************************/ |
27 | #define mb() asm volatile("mfence" : : : "memory") |
27 | #define mb() asm volatile("mfence" : : : "memory") |
28 | #define rmb() asm volatile("lfence" : : : "memory") |
28 | #define rmb() asm volatile("lfence" : : : "memory") |
29 | #define wmb() asm volatile("sfence" : : : "memory") |
29 | #define wmb() asm volatile("sfence" : : : "memory") |
30 | 30 | ||
31 | #include "vmwgfx_drv.h" |
31 | #include "vmwgfx_drv.h" |
32 | #include |
32 | #include |
33 | #include |
33 | #include |
34 | 34 | ||
- | 35 | #define VMW_PPN_SIZE (sizeof(unsigned long)) |
|
- | 36 | /* A future safe maximum remap size. */ |
|
35 | #define VMW_PPN_SIZE sizeof(unsigned long) |
37 | #define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE) |
36 | 38 | ||
37 | static int vmw_gmr2_bind(struct vmw_private *dev_priv, |
39 | static int vmw_gmr2_bind(struct vmw_private *dev_priv, |
38 | struct page *pages[], |
40 | struct page *pages[], |
39 | unsigned long num_pages, |
41 | unsigned long num_pages, |
40 | int gmr_id) |
42 | int gmr_id) |
41 | { |
43 | { |
42 | SVGAFifoCmdDefineGMR2 define_cmd; |
44 | SVGAFifoCmdDefineGMR2 define_cmd; |
43 | SVGAFifoCmdRemapGMR2 remap_cmd; |
45 | SVGAFifoCmdRemapGMR2 remap_cmd; |
44 | uint32_t define_size = sizeof(define_cmd) + 4; |
- | |
45 | uint32_t remap_size = VMW_PPN_SIZE * num_pages + sizeof(remap_cmd) + 4; |
- | |
46 | uint32_t *cmd; |
46 | uint32_t *cmd; |
47 | uint32_t *cmd_orig; |
47 | uint32_t *cmd_orig; |
- | 48 | uint32_t define_size = sizeof(define_cmd) + sizeof(*cmd); |
|
- | 49 | uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0); |
|
- | 50 | uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num; |
|
- | 51 | uint32_t remap_pos = 0; |
|
- | 52 | uint32_t cmd_size = define_size + remap_size; |
|
48 | uint32_t i; |
53 | uint32_t i; |
49 | 54 | ||
50 | cmd_orig = cmd = vmw_fifo_reserve(dev_priv, define_size + remap_size); |
55 | cmd_orig = cmd = vmw_fifo_reserve(dev_priv, cmd_size); |
51 | if (unlikely(cmd == NULL)) |
56 | if (unlikely(cmd == NULL)) |
52 | return -ENOMEM; |
57 | return -ENOMEM; |
53 | 58 | ||
54 | define_cmd.gmrId = gmr_id; |
59 | define_cmd.gmrId = gmr_id; |
55 | define_cmd.numPages = num_pages; |
60 | define_cmd.numPages = num_pages; |
- | 61 | ||
- | 62 | *cmd++ = SVGA_CMD_DEFINE_GMR2; |
|
- | 63 | memcpy(cmd, &define_cmd, sizeof(define_cmd)); |
|
- | 64 | cmd += sizeof(define_cmd) / sizeof(*cmd); |
|
- | 65 | ||
- | 66 | /* |
|
- | 67 | * Need to split the command if there are too many |
|
- | 68 | * pages that goes into the gmr. |
|
- | 69 | */ |
|
56 | 70 | ||
57 | remap_cmd.gmrId = gmr_id; |
71 | remap_cmd.gmrId = gmr_id; |
58 | remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ? |
72 | remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ? |
59 | SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32; |
73 | SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32; |
60 | remap_cmd.offsetPages = 0; |
- | |
61 | remap_cmd.numPages = num_pages; |
- | |
62 | 74 | ||
- | 75 | while (num_pages > 0) { |
|
- | 76 | unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP); |
|
63 | *cmd++ = SVGA_CMD_DEFINE_GMR2; |
77 | |
64 | memcpy(cmd, &define_cmd, sizeof(define_cmd)); |
78 | remap_cmd.offsetPages = remap_pos; |
65 | cmd += sizeof(define_cmd) / sizeof(uint32); |
79 | remap_cmd.numPages = nr; |
66 | 80 | ||
67 | *cmd++ = SVGA_CMD_REMAP_GMR2; |
81 | *cmd++ = SVGA_CMD_REMAP_GMR2; |
68 | memcpy(cmd, &remap_cmd, sizeof(remap_cmd)); |
82 | memcpy(cmd, &remap_cmd, sizeof(remap_cmd)); |
69 | cmd += sizeof(remap_cmd) / sizeof(uint32); |
83 | cmd += sizeof(remap_cmd) / sizeof(*cmd); |
70 | 84 | ||
71 | for (i = 0; i < num_pages; ++i) { |
85 | for (i = 0; i < nr; ++i) { |
72 | if (VMW_PPN_SIZE <= 4) |
86 | if (VMW_PPN_SIZE <= 4) |
73 | *cmd = page_to_pfn(*pages++); |
87 | *cmd = page_to_pfn(*pages++); |
74 | else |
88 | else |
75 | *((uint64_t *)cmd) = page_to_pfn(*pages++); |
89 | *((uint64_t *)cmd) = page_to_pfn(*pages++); |
76 | 90 | ||
77 | cmd += VMW_PPN_SIZE / sizeof(*cmd); |
91 | cmd += VMW_PPN_SIZE / sizeof(*cmd); |
78 | } |
92 | } |
- | 93 | ||
- | 94 | num_pages -= nr; |
|
- | 95 | remap_pos += nr; |
|
- | 96 | } |
|
- | 97 | ||
- | 98 | BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd)); |
|
79 | 99 | ||
80 | vmw_fifo_commit(dev_priv, define_size + remap_size); |
100 | vmw_fifo_commit(dev_priv, cmd_size); |
81 | 101 | ||
82 | return 0; |
102 | return 0; |
83 | } |
103 | } |
84 | 104 | ||
85 | static void vmw_gmr2_unbind(struct vmw_private *dev_priv, |
105 | static void vmw_gmr2_unbind(struct vmw_private *dev_priv, |
86 | int gmr_id) |
106 | int gmr_id) |
87 | { |
107 | { |
88 | SVGAFifoCmdDefineGMR2 define_cmd; |
108 | SVGAFifoCmdDefineGMR2 define_cmd; |
89 | uint32_t define_size = sizeof(define_cmd) + 4; |
109 | uint32_t define_size = sizeof(define_cmd) + 4; |
90 | uint32_t *cmd; |
110 | uint32_t *cmd; |
91 | 111 | ||
92 | cmd = vmw_fifo_reserve(dev_priv, define_size); |
112 | cmd = vmw_fifo_reserve(dev_priv, define_size); |
93 | if (unlikely(cmd == NULL)) { |
113 | if (unlikely(cmd == NULL)) { |
94 | DRM_ERROR("GMR2 unbind failed.\n"); |
114 | DRM_ERROR("GMR2 unbind failed.\n"); |
95 | return; |
115 | return; |
96 | } |
116 | } |
97 | define_cmd.gmrId = gmr_id; |
117 | define_cmd.gmrId = gmr_id; |
98 | define_cmd.numPages = 0; |
118 | define_cmd.numPages = 0; |
99 | 119 | ||
100 | *cmd++ = SVGA_CMD_DEFINE_GMR2; |
120 | *cmd++ = SVGA_CMD_DEFINE_GMR2; |
101 | memcpy(cmd, &define_cmd, sizeof(define_cmd)); |
121 | memcpy(cmd, &define_cmd, sizeof(define_cmd)); |
102 | 122 | ||
103 | vmw_fifo_commit(dev_priv, define_size); |
123 | vmw_fifo_commit(dev_priv, define_size); |
104 | } |
124 | } |
105 | 125 | ||
106 | 126 | ||
107 | int vmw_gmr_bind(struct vmw_private *dev_priv, |
127 | int vmw_gmr_bind(struct vmw_private *dev_priv, |
108 | struct page *pages[], |
128 | struct page *pages[], |
109 | unsigned long num_pages, |
129 | unsigned long num_pages, |
110 | int gmr_id) |
130 | int gmr_id) |
111 | { |
131 | { |
112 | struct list_head desc_pages; |
132 | struct list_head desc_pages; |
113 | int ret; |
133 | int ret; |
114 | 134 | ||
115 | if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) |
135 | if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) |
116 | return vmw_gmr2_bind(dev_priv, pages, num_pages, gmr_id); |
136 | return vmw_gmr2_bind(dev_priv, pages, num_pages, gmr_id); |
117 | 137 | ||
118 | printf("%s epic fail\n",__FUNCTION__); |
138 | printf("%s epic fail\n",__FUNCTION__); |
119 | return -EINVAL; |
139 | return -EINVAL; |
120 | } |
140 | } |
121 | 141 | ||
122 | 142 | ||
123 | void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id) |
143 | void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id) |
124 | { |
144 | { |
125 | if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) { |
145 | if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) { |
126 | vmw_gmr2_unbind(dev_priv, gmr_id); |
146 | vmw_gmr2_unbind(dev_priv, gmr_id); |
127 | return; |
147 | return; |
128 | } |
148 | } |
129 | 149 | ||
130 | mutex_lock(&dev_priv->hw_mutex); |
150 | mutex_lock(&dev_priv->hw_mutex); |
131 | vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id); |
151 | vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id); |
132 | wmb(); |
152 | wmb(); |
133 | vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0); |
153 | vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0); |
134 | mb(); |
154 | mb(); |
135 | mutex_unlock(&dev_priv->hw_mutex); |
155 | mutex_unlock(&dev_priv->hw_mutex); |
136 | }=>> |
156 | }=>> |