Rev 2351 | Rev 2360 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2351 | Rev 2352 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright © 2010 Daniel Vetter |
2 | * Copyright © 2010 Daniel Vetter |
3 | * |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
13 | * Software. |
14 | * |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
21 | * IN THE SOFTWARE. |
21 | * IN THE SOFTWARE. |
22 | * |
22 | * |
23 | */ |
23 | */ |
24 | 24 | ||
25 | #include "drmP.h" |
25 | #include "drmP.h" |
26 | #include "drm.h" |
26 | #include "drm.h" |
27 | #include "i915_drm.h" |
27 | #include "i915_drm.h" |
28 | #include "i915_drv.h" |
28 | #include "i915_drv.h" |
29 | #include "i915_trace.h" |
29 | #include "i915_trace.h" |
30 | #include "intel_drv.h" |
30 | #include "intel_drv.h" |
31 | 31 | ||
32 | #define AGP_USER_TYPES (1 << 16) |
32 | #define AGP_USER_TYPES (1 << 16) |
33 | #define AGP_USER_MEMORY (AGP_USER_TYPES) |
33 | #define AGP_USER_MEMORY (AGP_USER_TYPES) |
34 | #define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1) |
34 | #define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1) |
35 | 35 | ||
36 | /* XXX kill agp_type! */ |
36 | /* XXX kill agp_type! */ |
37 | static unsigned int cache_level_to_agp_type(struct drm_device *dev, |
37 | static unsigned int cache_level_to_agp_type(struct drm_device *dev, |
38 | enum i915_cache_level cache_level) |
38 | enum i915_cache_level cache_level) |
39 | { |
39 | { |
40 | switch (cache_level) { |
40 | switch (cache_level) { |
41 | case I915_CACHE_LLC_MLC: |
41 | case I915_CACHE_LLC_MLC: |
42 | if (INTEL_INFO(dev)->gen >= 6) |
42 | if (INTEL_INFO(dev)->gen >= 6) |
43 | return AGP_USER_CACHED_MEMORY_LLC_MLC; |
43 | return AGP_USER_CACHED_MEMORY_LLC_MLC; |
44 | /* Older chipsets do not have this extra level of CPU |
44 | /* Older chipsets do not have this extra level of CPU |
45 | * cacheing, so fallthrough and request the PTE simply |
45 | * cacheing, so fallthrough and request the PTE simply |
46 | * as cached. |
46 | * as cached. |
47 | */ |
47 | */ |
48 | case I915_CACHE_LLC: |
48 | case I915_CACHE_LLC: |
49 | return AGP_USER_CACHED_MEMORY; |
49 | return AGP_USER_CACHED_MEMORY; |
50 | default: |
50 | default: |
51 | case I915_CACHE_NONE: |
51 | case I915_CACHE_NONE: |
52 | return AGP_USER_MEMORY; |
52 | return AGP_USER_MEMORY; |
53 | } |
53 | } |
54 | } |
54 | } |
55 | 55 | ||
56 | static bool do_idling(struct drm_i915_private *dev_priv) |
56 | static bool do_idling(struct drm_i915_private *dev_priv) |
57 | { |
57 | { |
58 | bool ret = dev_priv->mm.interruptible; |
58 | bool ret = dev_priv->mm.interruptible; |
59 | 59 | ||
60 | if (unlikely(dev_priv->mm.gtt->do_idle_maps)) { |
60 | if (unlikely(dev_priv->mm.gtt->do_idle_maps)) { |
61 | dev_priv->mm.interruptible = false; |
61 | dev_priv->mm.interruptible = false; |
62 | if (i915_gpu_idle(dev_priv->dev)) { |
62 | if (i915_gpu_idle(dev_priv->dev)) { |
63 | DRM_ERROR("Couldn't idle GPU\n"); |
63 | DRM_ERROR("Couldn't idle GPU\n"); |
64 | /* Wait a bit, in hopes it avoids the hang */ |
64 | /* Wait a bit, in hopes it avoids the hang */ |
65 | udelay(10); |
65 | udelay(10); |
66 | } |
66 | } |
67 | } |
67 | } |
68 | 68 | ||
69 | return ret; |
69 | return ret; |
70 | } |
70 | } |
71 | 71 | ||
72 | static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) |
72 | static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) |
73 | { |
73 | { |
74 | if (unlikely(dev_priv->mm.gtt->do_idle_maps)) |
74 | if (unlikely(dev_priv->mm.gtt->do_idle_maps)) |
75 | dev_priv->mm.interruptible = interruptible; |
75 | dev_priv->mm.interruptible = interruptible; |
76 | } |
76 | } |
77 | 77 | ||
78 | #if 0 |
78 | #if 0 |
79 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) |
79 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) |
80 | { |
80 | { |
81 | struct drm_i915_private *dev_priv = dev->dev_private; |
81 | struct drm_i915_private *dev_priv = dev->dev_private; |
82 | struct drm_i915_gem_object *obj; |
82 | struct drm_i915_gem_object *obj; |
83 | 83 | ||
84 | /* First fill our portion of the GTT with scratch pages */ |
84 | /* First fill our portion of the GTT with scratch pages */ |
85 | intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE, |
85 | intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE, |
86 | (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); |
86 | (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); |
87 | 87 | ||
88 | list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { |
88 | list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { |
89 | i915_gem_clflush_object(obj); |
89 | i915_gem_clflush_object(obj); |
90 | i915_gem_gtt_rebind_object(obj, obj->cache_level); |
90 | i915_gem_gtt_rebind_object(obj, obj->cache_level); |
91 | } |
91 | } |
92 | 92 | ||
93 | intel_gtt_chipset_flush(); |
93 | intel_gtt_chipset_flush(); |
94 | } |
94 | } |
95 | #endif |
95 | #endif |
96 | 96 | ||
97 | int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj) |
97 | int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj) |
98 | { |
98 | { |
99 | struct drm_device *dev = obj->base.dev; |
99 | struct drm_device *dev = obj->base.dev; |
100 | struct drm_i915_private *dev_priv = dev->dev_private; |
100 | struct drm_i915_private *dev_priv = dev->dev_private; |
101 | unsigned int agp_type = cache_level_to_agp_type(dev, obj->cache_level); |
101 | unsigned int agp_type = cache_level_to_agp_type(dev, obj->cache_level); |
102 | int ret; |
102 | int ret; |
103 | 103 | ||
104 | // if (dev_priv->mm.gtt->needs_dmar) { |
104 | // if (dev_priv->mm.gtt->needs_dmar) { |
105 | // ret = intel_gtt_map_memory(obj->pages, |
105 | // ret = intel_gtt_map_memory(obj->pages, |
106 | // obj->base.size >> PAGE_SHIFT, |
106 | // obj->base.size >> PAGE_SHIFT, |
107 | // &obj->sg_list, |
107 | // &obj->sg_list, |
108 | // &obj->num_sg); |
108 | // &obj->num_sg); |
109 | // if (ret != 0) |
109 | // if (ret != 0) |
110 | // return ret; |
110 | // return ret; |
111 | 111 | ||
112 | // intel_gtt_insert_sg_entries(obj->sg_list, |
112 | // intel_gtt_insert_sg_entries(obj->sg_list, |
113 | // obj->num_sg, |
113 | // obj->num_sg, |
114 | // obj->gtt_space->start >> PAGE_SHIFT, |
114 | // obj->gtt_space->start >> PAGE_SHIFT, |
115 | // agp_type); |
115 | // agp_type); |
116 | // } else |
116 | // } else |
117 | intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT, |
117 | intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT, |
118 | obj->base.size >> PAGE_SHIFT, |
118 | obj->base.size >> PAGE_SHIFT, |
119 | obj->pages, |
119 | obj->pages, |
120 | agp_type); |
120 | agp_type); |
121 | 121 | ||
122 | return 0; |
122 | return 0; |
123 | } |
123 | } |
124 | - | ||
125 | #if 0 |
124 | |
126 | void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, |
125 | void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, |
127 | enum i915_cache_level cache_level) |
126 | enum i915_cache_level cache_level) |
128 | { |
127 | { |
129 | struct drm_device *dev = obj->base.dev; |
128 | struct drm_device *dev = obj->base.dev; |
130 | struct drm_i915_private *dev_priv = dev->dev_private; |
129 | struct drm_i915_private *dev_priv = dev->dev_private; |
131 | unsigned int agp_type = cache_level_to_agp_type(dev, cache_level); |
130 | unsigned int agp_type = cache_level_to_agp_type(dev, cache_level); |
132 | 131 | ||
133 | if (dev_priv->mm.gtt->needs_dmar) { |
132 | // if (dev_priv->mm.gtt->needs_dmar) { |
134 | BUG_ON(!obj->sg_list); |
133 | // BUG_ON(!obj->sg_list); |
135 | 134 | ||
136 | intel_gtt_insert_sg_entries(obj->sg_list, |
135 | // intel_gtt_insert_sg_entries(obj->sg_list, |
137 | obj->num_sg, |
136 | // obj->num_sg, |
138 | obj->gtt_space->start >> PAGE_SHIFT, |
137 | // obj->gtt_space->start >> PAGE_SHIFT, |
139 | agp_type); |
138 | // agp_type); |
140 | } else |
139 | // } else |
141 | intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT, |
140 | intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT, |
142 | obj->base.size >> PAGE_SHIFT, |
141 | obj->base.size >> PAGE_SHIFT, |
143 | obj->pages, |
142 | obj->pages, |
144 | agp_type); |
143 | agp_type); |
145 | } |
144 | } |
146 | - | ||
147 | #endif |
145 | |
148 | 146 | ||
149 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) |
147 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) |
150 | { |
148 | { |
151 | struct drm_device *dev = obj->base.dev; |
149 | struct drm_device *dev = obj->base.dev; |
152 | struct drm_i915_private *dev_priv = dev->dev_private; |
150 | struct drm_i915_private *dev_priv = dev->dev_private; |
153 | bool interruptible; |
151 | bool interruptible; |
154 | 152 | ||
155 | interruptible = do_idling(dev_priv); |
153 | interruptible = do_idling(dev_priv); |
156 | 154 | ||
157 | intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, |
155 | intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, |
158 | obj->base.size >> PAGE_SHIFT); |
156 | obj->base.size >> PAGE_SHIFT); |
159 | 157 | ||
160 | if (obj->sg_list) { |
158 | if (obj->sg_list) { |
161 | // intel_gtt_unmap_memory(obj->sg_list, obj->num_sg); |
159 | // intel_gtt_unmap_memory(obj->sg_list, obj->num_sg); |
162 | obj->sg_list = NULL; |
160 | obj->sg_list = NULL; |
163 | } |
161 | } |
164 | 162 | ||
165 | undo_idling(dev_priv, interruptible); |
163 | undo_idling(dev_priv, interruptible); |
166 | }><> |
164 | }><> |