Rev 6660 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 6660 | Rev 6937 | ||
---|---|---|---|
Line 54... | Line 54... | ||
54 | unsigned long vm_mmap(struct file *file, unsigned long addr, |
54 | unsigned long vm_mmap(struct file *file, unsigned long addr, |
55 | unsigned long len, unsigned long prot, |
55 | unsigned long len, unsigned long prot, |
56 | unsigned long flag, unsigned long offset); |
56 | unsigned long flag, unsigned long offset); |
Line 57... | Line -... | ||
57 | - | ||
58 | - | ||
59 | #define MAX_ERRNO 4095 |
- | |
60 | - | ||
61 | #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO) |
- | |
62 | #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) |
57 | |
63 | 58 | ||
64 | static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); |
59 | static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); |
65 | static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); |
60 | static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); |
66 | static void |
61 | static void |
Line 1971... | Line 1966... | ||
1971 | sg_set_page(sg, page, PAGE_SIZE, 0); |
1966 | sg_set_page(sg, page, PAGE_SIZE, 0); |
1972 | } else { |
1967 | } else { |
1973 | sg->length += PAGE_SIZE; |
1968 | sg->length += PAGE_SIZE; |
1974 | } |
1969 | } |
1975 | last_pfn = page_to_pfn(page); |
1970 | last_pfn = page_to_pfn(page); |
- | 1971 | ||
- | 1972 | /* Check that the i965g/gm workaround works. */ |
|
- | 1973 | WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL)); |
|
1976 | } |
1974 | } |
1977 | #ifdef CONFIG_SWIOTLB |
1975 | #ifdef CONFIG_SWIOTLB |
1978 | if (!swiotlb_nr_tbl()) |
1976 | if (!swiotlb_nr_tbl()) |
1979 | #endif |
1977 | #endif |
1980 | sg_mark_end(sg); |
1978 | sg_mark_end(sg); |
Line 2437... | Line 2435... | ||
2437 | } |
2435 | } |
Line 2438... | Line 2436... | ||
2438 | 2436 | ||
2439 | static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, |
2437 | static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, |
2440 | struct intel_engine_cs *ring) |
2438 | struct intel_engine_cs *ring) |
- | 2439 | { |
|
- | 2440 | struct intel_ringbuffer *buffer; |
|
2441 | { |
2441 | |
2442 | while (!list_empty(&ring->active_list)) { |
2442 | while (!list_empty(&ring->active_list)) { |
Line 2443... | Line 2443... | ||
2443 | struct drm_i915_gem_object *obj; |
2443 | struct drm_i915_gem_object *obj; |
2444 | 2444 | ||
Line 2452... | Line 2452... | ||
2452 | /* |
2452 | /* |
2453 | * Clear the execlists queue up before freeing the requests, as those |
2453 | * Clear the execlists queue up before freeing the requests, as those |
2454 | * are the ones that keep the context and ringbuffer backing objects |
2454 | * are the ones that keep the context and ringbuffer backing objects |
2455 | * pinned in place. |
2455 | * pinned in place. |
2456 | */ |
2456 | */ |
2457 | while (!list_empty(&ring->execlist_queue)) { |
- | |
2458 | struct drm_i915_gem_request *submit_req; |
- | |
Line 2459... | Line -... | ||
2459 | - | ||
2460 | submit_req = list_first_entry(&ring->execlist_queue, |
- | |
2461 | struct drm_i915_gem_request, |
2457 | |
2462 | execlist_link); |
2458 | if (i915.enable_execlists) { |
Line -... | Line 2459... | ||
- | 2459 | spin_lock_irq(&ring->execlist_lock); |
|
2463 | list_del(&submit_req->execlist_link); |
2460 | |
2464 | 2461 | /* list_splice_tail_init checks for empty lists */ |
|
Line -... | Line 2462... | ||
- | 2462 | list_splice_tail_init(&ring->execlist_queue, |
|
2465 | if (submit_req->ctx != ring->default_context) |
2463 | &ring->execlist_retired_req_list); |
2466 | intel_lr_context_unpin(submit_req); |
2464 | |
Line 2467... | Line 2465... | ||
2467 | 2465 | spin_unlock_irq(&ring->execlist_lock); |
|
2468 | i915_gem_request_unreference(submit_req); |
2466 | intel_execlists_retire_requests(ring); |
2469 | } |
2467 | } |
Line 2482... | Line 2480... | ||
2482 | struct drm_i915_gem_request, |
2480 | struct drm_i915_gem_request, |
2483 | list); |
2481 | list); |
Line 2484... | Line 2482... | ||
2484 | 2482 | ||
2485 | i915_gem_request_retire(request); |
2483 | i915_gem_request_retire(request); |
- | 2484 | } |
|
- | 2485 | ||
- | 2486 | /* Having flushed all requests from all queues, we know that all |
|
- | 2487 | * ringbuffers must now be empty. However, since we do not reclaim |
|
- | 2488 | * all space when retiring the request (to prevent HEADs colliding |
|
- | 2489 | * with rapid ringbuffer wraparound) the amount of available space |
|
- | 2490 | * upon reset is less than when we start. Do one more pass over |
|
- | 2491 | * all the ringbuffers to reset last_retired_head. |
|
- | 2492 | */ |
|
- | 2493 | list_for_each_entry(buffer, &ring->buffers, link) { |
|
- | 2494 | buffer->last_retired_head = buffer->tail; |
|
- | 2495 | intel_ring_update_space(buffer); |
|
2486 | } |
2496 | } |
Line 2487... | Line 2497... | ||
2487 | } |
2497 | } |
2488 | 2498 | ||
2489 | void i915_gem_reset(struct drm_device *dev) |
2499 | void i915_gem_reset(struct drm_device *dev) |
Line 2582... | Line 2592... | ||
2582 | 2592 | ||
2583 | intel_execlists_retire_requests(ring); |
2593 | intel_execlists_retire_requests(ring); |
2584 | } |
2594 | } |
Line 2585... | Line 2595... | ||
2585 | } |
2595 | } |
2586 | 2596 | ||
2587 | if (idle) |
2597 | // if (idle) |
2588 | mod_delayed_work(dev_priv->wq, |
2598 | // mod_delayed_work(dev_priv->wq, |
Line 2589... | Line 2599... | ||
2589 | &dev_priv->mm.idle_work, |
2599 | // &dev_priv->mm.idle_work, |
2590 | msecs_to_jiffies(100)); |
2600 | // msecs_to_jiffies(100)); |
Line 2591... | Line 2601... | ||
2591 | 2601 | ||
Line 2622... | Line 2632... | ||
2622 | 2632 | ||
2623 | for_each_ring(ring, dev_priv, i) |
2633 | for_each_ring(ring, dev_priv, i) |
2624 | if (!list_empty(&ring->request_list)) |
2634 | if (!list_empty(&ring->request_list)) |
Line -... | Line 2635... | ||
- | 2635 | return; |
|
- | 2636 | ||
- | 2637 | /* we probably should sync with hangcheck here, using cancel_work_sync. |
|
- | 2638 | * Also locking seems to be fubar here, ring->request_list is protected |
|
2625 | return; |
2639 | * by dev->struct_mutex. */ |
Line 2626... | Line 2640... | ||
2626 | 2640 | ||
2627 | intel_mark_idle(dev); |
2641 | intel_mark_idle(dev); |
2628 | 2642 | ||
Line 2746... | Line 2760... | ||
2746 | 2760 | ||
2747 | for (i = 0; i < n; i++) { |
2761 | for (i = 0; i < n; i++) { |
2748 | if (ret == 0) |
2762 | if (ret == 0) |
2749 | ret = __i915_wait_request(req[i], reset_counter, true, |
2763 | ret = __i915_wait_request(req[i], reset_counter, true, |
2750 | args->timeout_ns > 0 ? &args->timeout_ns : NULL, |
2764 | args->timeout_ns > 0 ? &args->timeout_ns : NULL, |
2751 | file->driver_priv); |
2765 | to_rps_client(file)); |
2752 | i915_gem_request_unreference__unlocked(req[i]); |
2766 | i915_gem_request_unreference__unlocked(req[i]); |
2753 | } |
2767 | } |
Line 2754... | Line 2768... | ||
2754 | return ret; |
2768 | return ret; |
Line 3112... | Line 3126... | ||
3112 | start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; |
3126 | start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; |
3113 | end = vm->total; |
3127 | end = vm->total; |
3114 | if (flags & PIN_MAPPABLE) |
3128 | if (flags & PIN_MAPPABLE) |
3115 | end = min_t(u64, end, dev_priv->gtt.mappable_end); |
3129 | end = min_t(u64, end, dev_priv->gtt.mappable_end); |
3116 | if (flags & PIN_ZONE_4G) |
3130 | if (flags & PIN_ZONE_4G) |
3117 | end = min_t(u64, end, (1ULL << 32)); |
3131 | end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE); |
Line 3118... | Line 3132... | ||
3118 | 3132 | ||
3119 | if (alignment == 0) |
3133 | if (alignment == 0) |
3120 | alignment = flags & PIN_MAPPABLE ? fence_alignment : |
3134 | alignment = flags & PIN_MAPPABLE ? fence_alignment : |
3121 | unfenced_alignment; |
3135 | unfenced_alignment; |
Line 3149... | Line 3163... | ||
3149 | i915_gem_obj_lookup_or_create_vma(obj, vm); |
3163 | i915_gem_obj_lookup_or_create_vma(obj, vm); |
Line 3150... | Line 3164... | ||
3150 | 3164 | ||
3151 | if (IS_ERR(vma)) |
3165 | if (IS_ERR(vma)) |
Line -... | Line 3166... | ||
- | 3166 | goto err_unpin; |
|
- | 3167 | ||
- | 3168 | if (flags & PIN_OFFSET_FIXED) { |
|
- | 3169 | uint64_t offset = flags & PIN_OFFSET_MASK; |
|
- | 3170 | ||
- | 3171 | if (offset & (alignment - 1) || offset + size > end) { |
|
- | 3172 | ret = -EINVAL; |
|
- | 3173 | goto err_free_vma; |
|
- | 3174 | } |
|
- | 3175 | vma->node.start = offset; |
|
- | 3176 | vma->node.size = size; |
|
- | 3177 | vma->node.color = obj->cache_level; |
|
- | 3178 | ret = drm_mm_reserve_node(&vm->mm, &vma->node); |
|
- | 3179 | if (ret) |
|
3152 | goto err_unpin; |
3180 | goto err_free_vma; |
3153 | 3181 | } else { |
|
3154 | if (flags & PIN_HIGH) { |
3182 | if (flags & PIN_HIGH) { |
3155 | search_flag = DRM_MM_SEARCH_BELOW; |
3183 | search_flag = DRM_MM_SEARCH_BELOW; |
3156 | alloc_flag = DRM_MM_CREATE_TOP; |
3184 | alloc_flag = DRM_MM_CREATE_TOP; |
Line 3168... | Line 3196... | ||
3168 | alloc_flag); |
3196 | alloc_flag); |
3169 | if (ret) { |
3197 | if (ret) { |
Line 3170... | Line 3198... | ||
3170 | 3198 | ||
3171 | goto err_free_vma; |
3199 | goto err_free_vma; |
- | 3200 | } |
|
3172 | } |
3201 | } |
3173 | if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) { |
3202 | if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) { |
3174 | ret = -EINVAL; |
3203 | ret = -EINVAL; |
3175 | goto err_remove_node; |
3204 | goto err_remove_node; |
Line 3520... | Line 3549... | ||
3520 | * Due to a HW issue on BXT A stepping, GPU stores via a |
3549 | * Due to a HW issue on BXT A stepping, GPU stores via a |
3521 | * snooped mapping may leave stale data in a corresponding CPU |
3550 | * snooped mapping may leave stale data in a corresponding CPU |
3522 | * cacheline, whereas normally such cachelines would get |
3551 | * cacheline, whereas normally such cachelines would get |
3523 | * invalidated. |
3552 | * invalidated. |
3524 | */ |
3553 | */ |
3525 | if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) |
3554 | if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) |
3526 | return -ENODEV; |
3555 | return -ENODEV; |
Line 3527... | Line 3556... | ||
3527 | 3556 | ||
3528 | level = I915_CACHE_LLC; |
3557 | level = I915_CACHE_LLC; |
3529 | break; |
3558 | break; |
Line 3563... | Line 3592... | ||
3563 | * any flushes to be pipelined (for pageflips). |
3592 | * any flushes to be pipelined (for pageflips). |
3564 | */ |
3593 | */ |
3565 | int |
3594 | int |
3566 | i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, |
3595 | i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, |
3567 | u32 alignment, |
3596 | u32 alignment, |
3568 | struct intel_engine_cs *pipelined, |
- | |
3569 | struct drm_i915_gem_request **pipelined_request, |
- | |
3570 | const struct i915_ggtt_view *view) |
3597 | const struct i915_ggtt_view *view) |
3571 | { |
3598 | { |
3572 | u32 old_read_domains, old_write_domain; |
3599 | u32 old_read_domains, old_write_domain; |
3573 | int ret; |
3600 | int ret; |
Line 3574... | Line -... | ||
3574 | - | ||
3575 | ret = i915_gem_object_sync(obj, pipelined, pipelined_request); |
- | |
3576 | if (ret) |
- | |
3577 | return ret; |
- | |
3578 | 3601 | ||
3579 | /* Mark the pin_display early so that we account for the |
3602 | /* Mark the pin_display early so that we account for the |
3580 | * display coherency whilst setting up the cache domains. |
3603 | * display coherency whilst setting up the cache domains. |
3581 | */ |
3604 | */ |
Line 3763... | Line 3786... | ||
3763 | 3786 | ||
3764 | if (flags & PIN_OFFSET_BIAS && |
3787 | if (flags & PIN_OFFSET_BIAS && |
3765 | vma->node.start < (flags & PIN_OFFSET_MASK)) |
3788 | vma->node.start < (flags & PIN_OFFSET_MASK)) |
Line -... | Line 3789... | ||
- | 3789 | return true; |
|
- | 3790 | ||
- | 3791 | if (flags & PIN_OFFSET_FIXED && |
|
- | 3792 | vma->node.start != (flags & PIN_OFFSET_MASK)) |
|
3766 | return true; |
3793 | return true; |
3767 | 3794 | ||
Line 3768... | Line 3795... | ||
3768 | return false; |
3795 | return false; |
3769 | } |
3796 | } |
Line 4028... | Line 4055... | ||
4028 | 4055 | ||
4029 | i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size); |
4056 | i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size); |
Line 4030... | Line 4057... | ||
4030 | } |
4057 | } |
- | 4058 | ||
4031 | 4059 | static const struct drm_i915_gem_object_ops i915_gem_object_ops = { |
|
4032 | static const struct drm_i915_gem_object_ops i915_gem_object_ops = { |
4060 | .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE, |
4033 | .get_pages = i915_gem_object_get_pages_gtt, |
4061 | .get_pages = i915_gem_object_get_pages_gtt, |
Line 4034... | Line 4062... | ||
4034 | .put_pages = i915_gem_object_put_pages_gtt, |
4062 | .put_pages = i915_gem_object_put_pages_gtt, |
Line 4161... | Line 4189... | ||
4161 | struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, |
4189 | struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, |
4162 | struct i915_address_space *vm) |
4190 | struct i915_address_space *vm) |
4163 | { |
4191 | { |
4164 | struct i915_vma *vma; |
4192 | struct i915_vma *vma; |
4165 | list_for_each_entry(vma, &obj->vma_list, vma_link) { |
4193 | list_for_each_entry(vma, &obj->vma_list, vma_link) { |
4166 | if (i915_is_ggtt(vma->vm) && |
- | |
4167 | vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) |
4194 | if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL && |
4168 | continue; |
- | |
4169 | if (vma->vm == vm) |
4195 | vma->vm == vm) |
4170 | return vma; |
4196 | return vma; |
4171 | } |
4197 | } |
4172 | return NULL; |
4198 | return NULL; |
4173 | } |
4199 | } |
Line 4255... | Line 4281... | ||
4255 | int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice) |
4281 | int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice) |
4256 | { |
4282 | { |
4257 | struct intel_engine_cs *ring = req->ring; |
4283 | struct intel_engine_cs *ring = req->ring; |
4258 | struct drm_device *dev = ring->dev; |
4284 | struct drm_device *dev = ring->dev; |
4259 | struct drm_i915_private *dev_priv = dev->dev_private; |
4285 | struct drm_i915_private *dev_priv = dev->dev_private; |
4260 | u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200); |
- | |
4261 | u32 *remap_info = dev_priv->l3_parity.remap_info[slice]; |
4286 | u32 *remap_info = dev_priv->l3_parity.remap_info[slice]; |
4262 | int i, ret; |
4287 | int i, ret; |
Line 4263... | Line 4288... | ||
4263 | 4288 | ||
4264 | if (!HAS_L3_DPF(dev) || !remap_info) |
4289 | if (!HAS_L3_DPF(dev) || !remap_info) |
Line 4271... | Line 4296... | ||
4271 | /* |
4296 | /* |
4272 | * Note: We do not worry about the concurrent register cacheline hang |
4297 | * Note: We do not worry about the concurrent register cacheline hang |
4273 | * here because no other code should access these registers other than |
4298 | * here because no other code should access these registers other than |
4274 | * at initialization time. |
4299 | * at initialization time. |
4275 | */ |
4300 | */ |
4276 | for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) { |
4301 | for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) { |
4277 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); |
4302 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); |
4278 | intel_ring_emit(ring, reg_base + i); |
4303 | intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i)); |
4279 | intel_ring_emit(ring, remap_info[i/4]); |
4304 | intel_ring_emit(ring, remap_info[i]); |
4280 | } |
4305 | } |
Line 4281... | Line 4306... | ||
4281 | 4306 | ||
Line 4282... | Line 4307... | ||
4282 | intel_ring_advance(ring); |
4307 | intel_ring_advance(ring); |
Line 4442... | Line 4467... | ||
4442 | 4467 | ||
4443 | /* We can't enable contexts until all firmware is loaded */ |
4468 | /* We can't enable contexts until all firmware is loaded */ |
4444 | if (HAS_GUC_UCODE(dev)) { |
4469 | if (HAS_GUC_UCODE(dev)) { |
4445 | ret = intel_guc_ucode_load(dev); |
4470 | ret = intel_guc_ucode_load(dev); |
4446 | if (ret) { |
- | |
4447 | /* |
- | |
4448 | * If we got an error and GuC submission is enabled, map |
- | |
4449 | * the error to -EIO so the GPU will be declared wedged. |
- | |
4450 | * OTOH, if we didn't intend to use the GuC anyway, just |
- | |
4451 | * discard the error and carry on. |
- | |
4452 | */ |
4471 | if (ret) { |
4453 | DRM_ERROR("Failed to initialize GuC, error %d%s\n", ret, |
- | |
4454 | i915.enable_guc_submission ? "" : |
- | |
4455 | " (ignored)"); |
- | |
4456 | ret = i915.enable_guc_submission ? -EIO : 0; |
4472 | DRM_ERROR("Failed to initialize GuC, error %d\n", ret); |
4457 | if (ret) |
4473 | ret = -EIO; |
4458 | goto out; |
4474 | goto out; |
4459 | } |
4475 | } |
Line 4460... | Line 4476... | ||
4460 | } |
4476 | } |
Line 4516... | Line 4532... | ||
4516 | i915.enable_execlists = intel_sanitize_enable_execlists(dev, |
4532 | i915.enable_execlists = intel_sanitize_enable_execlists(dev, |
4517 | i915.enable_execlists); |
4533 | i915.enable_execlists); |
Line 4518... | Line 4534... | ||
4518 | 4534 | ||
Line 4519... | Line -... | ||
4519 | mutex_lock(&dev->struct_mutex); |
- | |
4520 | - | ||
4521 | if (IS_VALLEYVIEW(dev)) { |
- | |
4522 | /* VLVA0 (potential hack), BIOS isn't actually waking us */ |
- | |
4523 | I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ); |
- | |
4524 | if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & |
- | |
4525 | VLV_GTLC_ALLOWWAKEACK), 10)) |
- | |
4526 | DRM_DEBUG_DRIVER("allow wake ack timed out\n"); |
- | |
4527 | } |
4535 | mutex_lock(&dev->struct_mutex); |
4528 | 4536 | ||
4529 | if (!i915.enable_execlists) { |
4537 | if (!i915.enable_execlists) { |
4530 | dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission; |
4538 | dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission; |
4531 | dev_priv->gt.init_rings = i915_gem_init_rings; |
4539 | dev_priv->gt.init_rings = i915_gem_init_rings; |
Line 4617... | Line 4625... | ||
4617 | i915_gem_idle_work_handler); |
4625 | i915_gem_idle_work_handler); |
4618 | init_waitqueue_head(&dev_priv->gpu_error.reset_queue); |
4626 | init_waitqueue_head(&dev_priv->gpu_error.reset_queue); |
Line 4619... | Line 4627... | ||
4619 | 4627 | ||
Line 4620... | Line 4628... | ||
4620 | dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; |
4628 | dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; |
4621 | 4629 | ||
4622 | if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) |
4630 | if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) |
4623 | dev_priv->num_fence_regs = 32; |
4631 | dev_priv->num_fence_regs = 32; |
4624 | else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
4632 | else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
4625 | dev_priv->num_fence_regs = 16; |
4633 | dev_priv->num_fence_regs = 16; |
Line 4835... | Line 4843... | ||
4835 | return true; |
4843 | return true; |
Line 4836... | Line 4844... | ||
4836 | 4844 | ||
4837 | return false; |
4845 | return false; |
Line -... | Line 4846... | ||
- | 4846 | } |
|
- | 4847 | ||
- | 4848 | /* Like i915_gem_object_get_page(), but mark the returned page dirty */ |
|
- | 4849 | struct page * |
|
- | 4850 | i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n) |
|
- | 4851 | { |
|
- | 4852 | struct page *page; |
|
- | 4853 | ||
- | 4854 | /* Only default objects have per-page dirty tracking */ |
|
- | 4855 | if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0)) |
|
- | 4856 | return NULL; |
|
- | 4857 | ||
- | 4858 | page = i915_gem_object_get_page(obj, n); |
|
- | 4859 | set_page_dirty(page); |
|
- | 4860 | return page; |
|
4838 | } |
4861 | } |
4839 | 4862 | ||
4840 | /* Allocate a new GEM object and fill it with the supplied data */ |
4863 | /* Allocate a new GEM object and fill it with the supplied data */ |
4841 | struct drm_i915_gem_object * |
4864 | struct drm_i915_gem_object * |
4842 | i915_gem_object_create_from_data(struct drm_device *dev, |
4865 | i915_gem_object_create_from_data(struct drm_device *dev, |
Line 4860... | Line 4883... | ||
4860 | goto fail; |
4883 | goto fail; |
Line 4861... | Line 4884... | ||
4861 | 4884 | ||
4862 | i915_gem_object_pin_pages(obj); |
4885 | i915_gem_object_pin_pages(obj); |
4863 | sg = obj->pages; |
4886 | sg = obj->pages; |
- | 4887 | bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size); |
|
4864 | bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size); |
4888 | obj->dirty = 1; /* Backing store is now out of date */ |
Line 4865... | Line 4889... | ||
4865 | i915_gem_object_unpin_pages(obj); |
4889 | i915_gem_object_unpin_pages(obj); |
4866 | 4890 | ||
4867 | if (WARN_ON(bytes != size)) { |
4891 | if (WARN_ON(bytes != size)) { |