Rev 3764 | Rev 5271 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3764 | Rev 5078 | ||
---|---|---|---|
Line 26... | Line 26... | ||
26 | * Jerome Glisse |
26 | * Jerome Glisse |
27 | */ |
27 | */ |
28 | #include |
28 | #include |
29 | #include |
29 | #include |
30 | #include "radeon.h" |
30 | #include "radeon.h" |
31 | #include "radeon_reg.h" |
- | |
Line 32... | Line 31... | ||
32 | 31 | ||
33 | 32 | ||
34 | static inline void * |
33 | static inline void * |
Line 136... | Line 135... | ||
136 | int r; |
135 | int r; |
Line 137... | Line 136... | ||
137 | 136 | ||
138 | if (rdev->gart.robj == NULL) { |
137 | if (rdev->gart.robj == NULL) { |
139 | r = radeon_bo_create(rdev, rdev->gart.table_size, |
138 | r = radeon_bo_create(rdev, rdev->gart.table_size, |
140 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
139 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
141 | NULL, &rdev->gart.robj); |
140 | 0, NULL, &rdev->gart.robj); |
142 | if (r) { |
141 | if (r) { |
143 | return r; |
142 | return r; |
144 | } |
143 | } |
145 | } |
144 | } |
Line 214... | Line 213... | ||
214 | void radeon_gart_table_vram_free(struct radeon_device *rdev) |
213 | void radeon_gart_table_vram_free(struct radeon_device *rdev) |
215 | { |
214 | { |
216 | if (rdev->gart.robj == NULL) { |
215 | if (rdev->gart.robj == NULL) { |
217 | return; |
216 | return; |
218 | } |
217 | } |
219 | radeon_gart_table_vram_unpin(rdev); |
- | |
220 | radeon_bo_unref(&rdev->gart.robj); |
218 | radeon_bo_unref(&rdev->gart.robj); |
221 | } |
219 | } |
Line 222... | Line 220... | ||
222 | 220 | ||
223 | /* |
221 | /* |
Line 252... | Line 250... | ||
252 | rdev->gart.pages[p] = NULL; |
250 | rdev->gart.pages[p] = NULL; |
253 | rdev->gart.pages_addr[p] = rdev->dummy_page.addr; |
251 | rdev->gart.pages_addr[p] = rdev->dummy_page.addr; |
254 | page_base = rdev->gart.pages_addr[p]; |
252 | page_base = rdev->gart.pages_addr[p]; |
255 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
253 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
256 | if (rdev->gart.ptr) { |
254 | if (rdev->gart.ptr) { |
257 | radeon_gart_set_page(rdev, t, page_base); |
255 | radeon_gart_set_page(rdev, t, page_base, |
- | 256 | RADEON_GART_PAGE_DUMMY); |
|
258 | } |
257 | } |
259 | page_base += RADEON_GPU_PAGE_SIZE; |
258 | page_base += RADEON_GPU_PAGE_SIZE; |
260 | } |
259 | } |
261 | } |
260 | } |
262 | } |
261 | } |
Line 270... | Line 269... | ||
270 | * @rdev: radeon_device pointer |
269 | * @rdev: radeon_device pointer |
271 | * @offset: offset into the GPU's gart aperture |
270 | * @offset: offset into the GPU's gart aperture |
272 | * @pages: number of pages to bind |
271 | * @pages: number of pages to bind |
273 | * @pagelist: pages to bind |
272 | * @pagelist: pages to bind |
274 | * @dma_addr: DMA addresses of pages |
273 | * @dma_addr: DMA addresses of pages |
- | 274 | * @flags: RADEON_GART_PAGE_* flags |
|
275 | * |
275 | * |
276 | * Binds the requested pages to the gart page table |
276 | * Binds the requested pages to the gart page table |
277 | * (all asics). |
277 | * (all asics). |
278 | * Returns 0 for success, -EINVAL for failure. |
278 | * Returns 0 for success, -EINVAL for failure. |
279 | */ |
279 | */ |
280 | int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, |
280 | int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, |
281 | int pages, u32 *pagelist, dma_addr_t *dma_addr) |
281 | int pages, struct page **pagelist, dma_addr_t *dma_addr, |
- | 282 | uint32_t flags) |
|
282 | { |
283 | { |
283 | unsigned t; |
284 | unsigned t; |
284 | unsigned p; |
285 | unsigned p; |
285 | uint64_t page_base; |
286 | uint64_t page_base; |
286 | int i, j; |
287 | int i, j; |
Line 287... | Line -... | ||
287 | - | ||
288 | // dbgprintf("offset %x pages %d list %x\n", |
- | |
289 | // offset, pages, pagelist); |
288 | |
290 | if (!rdev->gart.ready) { |
289 | if (!rdev->gart.ready) { |
291 | WARN(1, "trying to bind memory to uninitialized GART !\n"); |
290 | WARN(1, "trying to bind memory to uninitialized GART !\n"); |
292 | return -EINVAL; |
291 | return -EINVAL; |
293 | } |
292 | } |
294 | t = offset / RADEON_GPU_PAGE_SIZE; |
293 | t = offset / RADEON_GPU_PAGE_SIZE; |
Line 295... | Line 294... | ||
295 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); |
294 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); |
296 | 295 | ||
297 | for (i = 0; i < pages; i++, p++) { |
296 | for (i = 0; i < pages; i++, p++) { |
298 | rdev->gart.pages_addr[p] = pagelist[i] & ~4095; |
297 | rdev->gart.pages_addr[p] = dma_addr[i]; |
299 | rdev->gart.pages[p] = pagelist[i]; |
298 | rdev->gart.pages[p] = pagelist[i]; |
300 | if (rdev->gart.ptr) { |
299 | if (rdev->gart.ptr) { |
301 | page_base = rdev->gart.pages_addr[p]; |
300 | page_base = rdev->gart.pages_addr[p]; |
302 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
301 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
303 | radeon_gart_set_page(rdev, t, page_base); |
302 | radeon_gart_set_page(rdev, t, page_base, flags); |
304 | page_base += RADEON_GPU_PAGE_SIZE; |
303 | page_base += RADEON_GPU_PAGE_SIZE; |
305 | } |
304 | } |
306 | } |
305 | } |
307 | } |
306 | } |
308 | mb(); |
307 | mb(); |
309 | radeon_gart_tlb_flush(rdev); |
308 | radeon_gart_tlb_flush(rdev); |
Line 310... | Line 309... | ||
310 | return 0; |
309 | return 0; |
311 | } |
- | |
312 | - | ||
313 | /** |
- | |
314 | * radeon_gart_restore - bind all pages in the gart page table |
- | |
315 | * |
- | |
316 | * @rdev: radeon_device pointer |
- | |
317 | * |
- | |
318 | * Binds all pages in the gart page table (all asics). |
- | |
319 | * Used to rebuild the gart table on device startup or resume. |
- | |
320 | */ |
- | |
321 | void radeon_gart_restore(struct radeon_device *rdev) |
- | |
322 | { |
- | |
323 | int i, j, t; |
- | |
324 | u64 page_base; |
- | |
325 | - | ||
326 | if (!rdev->gart.ptr) { |
- | |
327 | return; |
- | |
328 | } |
- | |
329 | for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) { |
- | |
330 | page_base = rdev->gart.pages_addr[i]; |
- | |
331 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
- | |
332 | radeon_gart_set_page(rdev, t, page_base); |
- | |
333 | page_base += RADEON_GPU_PAGE_SIZE; |
- | |
334 | } |
- | |
335 | } |
- | |
336 | mb(); |
- | |
337 | radeon_gart_tlb_flush(rdev); |
- | |
338 | } |
310 | } |
339 | 311 | ||
340 | /** |
312 | /** |
341 | * radeon_gart_init - init the driver info for managing the gart |
313 | * radeon_gart_init - init the driver info for managing the gart |
342 | * |
314 | * |
Line 400... | Line 372... | ||
400 | rdev->gart.ready = false; |
372 | rdev->gart.ready = false; |
401 | vfree(rdev->gart.pages); |
373 | vfree(rdev->gart.pages); |
402 | vfree(rdev->gart.pages_addr); |
374 | vfree(rdev->gart.pages_addr); |
403 | rdev->gart.pages = NULL; |
375 | rdev->gart.pages = NULL; |
404 | rdev->gart.pages_addr = NULL; |
376 | rdev->gart.pages_addr = NULL; |
405 | } |
- | |
406 | - | ||
407 | /* |
- | |
408 | * GPUVM |
- | |
409 | * GPUVM is similar to the legacy gart on older asics, however |
- | |
410 | * rather than there being a single global gart table |
- | |
411 | * for the entire GPU, there are multiple VM page tables active |
- | |
412 | * at any given time. The VM page tables can contain a mix |
- | |
413 | * vram pages and system memory pages and system memory pages |
- | |
414 | * can be mapped as snooped (cached system pages) or unsnooped |
- | |
415 | * (uncached system pages). |
- | |
416 | * Each VM has an ID associated with it and there is a page table |
- | |
417 | * associated with each VMID. When execting a command buffer, |
- | |
418 | * the kernel tells the the ring what VMID to use for that command |
- | |
419 | * buffer. VMIDs are allocated dynamically as commands are submitted. |
- | |
420 | * The userspace drivers maintain their own address space and the kernel |
- | |
421 | * sets up their pages tables accordingly when they submit their |
- | |
422 | * command buffers and a VMID is assigned. |
- | |
423 | * Cayman/Trinity support up to 8 active VMs at any given time; |
- | |
424 | * SI supports 16. |
- | |
425 | */ |
- | |
426 | - | ||
427 | /* |
- | |
428 | * vm helpers |
- | |
429 | * |
- | |
430 | * TODO bind a default page at vm initialization for default address |
- | |
431 | */ |
- | |
432 | - | ||
433 | /** |
- | |
434 | * radeon_vm_num_pde - return the number of page directory entries |
- | |
435 | * |
- | |
436 | * @rdev: radeon_device pointer |
- | |
437 | * |
- | |
438 | * Calculate the number of page directory entries (cayman+). |
- | |
439 | */ |
- | |
440 | static unsigned radeon_vm_num_pdes(struct radeon_device *rdev) |
- | |
441 | { |
- | |
442 | return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE; |
- | |
443 | } |
- | |
444 | - | ||
445 | /** |
- | |
446 | * radeon_vm_directory_size - returns the size of the page directory in bytes |
- | |
447 | * |
- | |
448 | * @rdev: radeon_device pointer |
- | |
449 | * |
- | |
450 | * Calculate the size of the page directory in bytes (cayman+). |
- | |
451 | */ |
- | |
452 | static unsigned radeon_vm_directory_size(struct radeon_device *rdev) |
- | |
453 | { |
- | |
454 | return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8); |
- | |
455 | } |
- | |
456 | - | ||
457 | /** |
- | |
458 | * radeon_vm_manager_init - init the vm manager |
- | |
459 | * |
- | |
460 | * @rdev: radeon_device pointer |
- | |
461 | * |
- | |
462 | * Init the vm manager (cayman+). |
- | |
463 | * Returns 0 for success, error for failure. |
- | |
464 | */ |
- | |
465 | int radeon_vm_manager_init(struct radeon_device *rdev) |
- | |
466 | { |
- | |
467 | struct radeon_vm *vm; |
- | |
468 | struct radeon_bo_va *bo_va; |
- | |
469 | int r; |
- | |
470 | unsigned size; |
- | |
471 | - | ||
472 | if (!rdev->vm_manager.enabled) { |
- | |
473 | /* allocate enough for 2 full VM pts */ |
- | |
474 | size = radeon_vm_directory_size(rdev); |
- | |
475 | size += rdev->vm_manager.max_pfn * 8; |
- | |
476 | size *= 2; |
- | |
477 | r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, |
- | |
478 | RADEON_GPU_PAGE_ALIGN(size), |
- | |
479 | RADEON_GEM_DOMAIN_VRAM); |
- | |
480 | if (r) { |
- | |
481 | dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n", |
- | |
482 | (rdev->vm_manager.max_pfn * 8) >> 10); |
- | |
483 | return r; |
- | |
484 | } |
- | |
485 | - | ||
486 | r = radeon_asic_vm_init(rdev); |
- | |
487 | if (r) |
- | |
488 | return r; |
- | |
489 | - | ||
490 | rdev->vm_manager.enabled = true; |
- | |
491 | - | ||
492 | r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager); |
- | |
493 | if (r) |
- | |
494 | return r; |
- | |
495 | } |
- | |
496 | - | ||
497 | /* restore page table */ |
- | |
498 | list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) { |
- | |
499 | if (vm->page_directory == NULL) |
- | |
500 | continue; |
- | |
501 | - | ||
502 | list_for_each_entry(bo_va, &vm->va, vm_list) { |
- | |
503 | bo_va->valid = false; |
- | |
504 | } |
- | |
505 | } |
- | |
506 | return 0; |
- | |
507 | } |
- | |
508 | - | ||
509 | /** |
- | |
510 | * radeon_vm_free_pt - free the page table for a specific vm |
- | |
511 | * |
- | |
512 | * @rdev: radeon_device pointer |
- | |
513 | * @vm: vm to unbind |
- | |
514 | * |
- | |
515 | * Free the page table of a specific vm (cayman+). |
- | |
516 | * |
- | |
517 | * Global and local mutex must be lock! |
- | |
518 | */ |
- | |
519 | static void radeon_vm_free_pt(struct radeon_device *rdev, |
- | |
520 | struct radeon_vm *vm) |
- | |
521 | { |
- | |
522 | struct radeon_bo_va *bo_va; |
- | |
523 | int i; |
- | |
524 | - | ||
525 | if (!vm->page_directory) |
- | |
526 | return; |
- | |
527 | - | ||
528 | list_del_init(&vm->list); |
- | |
529 | radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence); |
- | |
530 | - | ||
531 | list_for_each_entry(bo_va, &vm->va, vm_list) { |
- | |
532 | bo_va->valid = false; |
- | |
533 | } |
- | |
534 | - | ||
535 | if (vm->page_tables == NULL) |
- | |
536 | return; |
- | |
537 | - | ||
538 | for (i = 0; i < radeon_vm_num_pdes(rdev); i++) |
- | |
539 | radeon_sa_bo_free(rdev, &vm->page_tables[i], vm->fence); |
- | |
540 | - | ||
541 | kfree(vm->page_tables); |
- | |
542 | } |
- | |
543 | - | ||
544 | /** |
- | |
545 | * radeon_vm_manager_fini - tear down the vm manager |
- | |
546 | * |
- | |
547 | * @rdev: radeon_device pointer |
- | |
548 | * |
- | |
549 | * Tear down the VM manager (cayman+). |
- | |
550 | */ |
- | |
551 | void radeon_vm_manager_fini(struct radeon_device *rdev) |
- | |
552 | { |
- | |
553 | struct radeon_vm *vm, *tmp; |
- | |
554 | int i; |
- | |
555 | - | ||
556 | if (!rdev->vm_manager.enabled) |
- | |
557 | return; |
- | |
558 | - | ||
559 | mutex_lock(&rdev->vm_manager.lock); |
- | |
560 | /* free all allocated page tables */ |
- | |
561 | list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) { |
- | |
562 | mutex_lock(&vm->mutex); |
- | |
563 | radeon_vm_free_pt(rdev, vm); |
- | |
564 | mutex_unlock(&vm->mutex); |
- | |
565 | } |
- | |
566 | for (i = 0; i < RADEON_NUM_VM; ++i) { |
- | |
567 | radeon_fence_unref(&rdev->vm_manager.active[i]); |
- | |
568 | } |
- | |
569 | radeon_asic_vm_fini(rdev); |
- | |
570 | mutex_unlock(&rdev->vm_manager.lock); |
- | |
571 | - | ||
572 | radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager); |
- | |
573 | radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager); |
- | |
574 | rdev->vm_manager.enabled = false; |
- | |
575 | } |
- | |
576 | - | ||
577 | /** |
- | |
578 | * radeon_vm_evict - evict page table to make room for new one |
- | |
579 | * |
- | |
580 | * @rdev: radeon_device pointer |
- | |
581 | * @vm: VM we want to allocate something for |
- | |
582 | * |
- | |
583 | * Evict a VM from the lru, making sure that it isn't @vm. (cayman+). |
- | |
584 | * Returns 0 for success, -ENOMEM for failure. |
- | |
585 | * |
- | |
586 | * Global and local mutex must be locked! |
- | |
587 | */ |
- | |
588 | static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm) |
- | |
589 | { |
- | |
590 | struct radeon_vm *vm_evict; |
- | |
591 | - | ||
592 | if (list_empty(&rdev->vm_manager.lru_vm)) |
- | |
593 | return -ENOMEM; |
- | |
594 | - | ||
595 | vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, |
- | |
596 | struct radeon_vm, list); |
- | |
597 | if (vm_evict == vm) |
- | |
598 | return -ENOMEM; |
- | |
599 | - | ||
600 | mutex_lock(&vm_evict->mutex); |
- | |
601 | radeon_vm_free_pt(rdev, vm_evict); |
- | |
602 | mutex_unlock(&vm_evict->mutex); |
- | |
603 | return 0; |
- | |
604 | } |
- | |
605 | - | ||
606 | /** |
- | |
607 | * radeon_vm_alloc_pt - allocates a page table for a VM |
- | |
608 | * |
- | |
609 | * @rdev: radeon_device pointer |
- | |
610 | * @vm: vm to bind |
- | |
611 | * |
- | |
612 | * Allocate a page table for the requested vm (cayman+). |
- | |
613 | * Returns 0 for success, error for failure. |
- | |
614 | * |
- | |
615 | * Global and local mutex must be locked! |
- | |
616 | */ |
- | |
617 | int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm) |
- | |
618 | { |
- | |
619 | unsigned pd_size, pts_size; |
- | |
620 | u64 *pd_addr; |
- | |
621 | int r; |
- | |
622 | - | ||
623 | if (vm == NULL) { |
- | |
624 | return -EINVAL; |
- | |
625 | } |
- | |
626 | - | ||
627 | if (vm->page_directory != NULL) { |
- | |
628 | return 0; |
- | |
629 | } |
- | |
630 | - | ||
631 | retry: |
- | |
632 | pd_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev)); |
- | |
633 | r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, |
- | |
634 | &vm->page_directory, pd_size, |
- | |
635 | RADEON_GPU_PAGE_SIZE, false); |
- | |
636 | if (r == -ENOMEM) { |
- | |
637 | r = radeon_vm_evict(rdev, vm); |
- | |
638 | if (r) |
- | |
639 | return r; |
- | |
640 | goto retry; |
- | |
641 | - | ||
642 | } else if (r) { |
- | |
643 | return r; |
- | |
644 | } |
- | |
645 | - | ||
646 | vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory); |
- | |
647 | - | ||
648 | /* Initially clear the page directory */ |
- | |
649 | pd_addr = radeon_sa_bo_cpu_addr(vm->page_directory); |
- | |
650 | memset(pd_addr, 0, pd_size); |
- | |
651 | - | ||
652 | pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *); |
- | |
653 | vm->page_tables = kzalloc(pts_size, GFP_KERNEL); |
- | |
654 | - | ||
655 | if (vm->page_tables == NULL) { |
- | |
656 | DRM_ERROR("Cannot allocate memory for page table array\n"); |
- | |
657 | radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence); |
- | |
658 | return -ENOMEM; |
- | |
659 | } |
- | |
660 | - | ||
661 | return 0; |
- | |
662 | } |
- | |
663 | - | ||
664 | /** |
- | |
665 | * radeon_vm_add_to_lru - add VMs page table to LRU list |
- | |
666 | * |
- | |
667 | * @rdev: radeon_device pointer |
- | |
668 | * @vm: vm to add to LRU |
- | |
669 | * |
- | |
670 | * Add the allocated page table to the LRU list (cayman+). |
- | |
671 | * |
- | |
672 | * Global mutex must be locked! |
- | |
673 | */ |
- | |
674 | void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm) |
- | |
675 | { |
- | |
676 | list_del_init(&vm->list); |
- | |
677 | list_add_tail(&vm->list, &rdev->vm_manager.lru_vm); |
- | |
678 | } |
- | |
679 | - | ||
680 | /** |
- | |
681 | * radeon_vm_grab_id - allocate the next free VMID |
- | |
682 | * |
- | |
683 | * @rdev: radeon_device pointer |
- | |
684 | * @vm: vm to allocate id for |
- | |
685 | * @ring: ring we want to submit job to |
- | |
686 | * |
- | |
687 | * Allocate an id for the vm (cayman+). |
- | |
688 | * Returns the fence we need to sync to (if any). |
- | |
689 | * |
- | |
690 | * Global and local mutex must be locked! |
- | |
691 | */ |
- | |
692 | struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, |
- | |
693 | struct radeon_vm *vm, int ring) |
- | |
694 | { |
- | |
695 | struct radeon_fence *best[RADEON_NUM_RINGS] = {}; |
- | |
696 | unsigned choices[2] = {}; |
- | |
697 | unsigned i; |
- | |
698 | - | ||
699 | /* check if the id is still valid */ |
- | |
700 | if (vm->fence && vm->fence == rdev->vm_manager.active[vm->id]) |
- | |
701 | return NULL; |
- | |
702 | - | ||
703 | /* we definately need to flush */ |
- | |
704 | radeon_fence_unref(&vm->last_flush); |
- | |
705 | - | ||
706 | /* skip over VMID 0, since it is the system VM */ |
- | |
707 | for (i = 1; i < rdev->vm_manager.nvm; ++i) { |
- | |
708 | struct radeon_fence *fence = rdev->vm_manager.active[i]; |
- | |
709 | - | ||
710 | if (fence == NULL) { |
- | |
711 | /* found a free one */ |
- | |
712 | vm->id = i; |
- | |
713 | return NULL; |
- | |
714 | } |
- | |
715 | - | ||
716 | if (radeon_fence_is_earlier(fence, best[fence->ring])) { |
- | |
717 | best[fence->ring] = fence; |
- | |
718 | choices[fence->ring == ring ? 0 : 1] = i; |
- | |
719 | } |
- | |
720 | } |
- | |
721 | - | ||
722 | for (i = 0; i < 2; ++i) { |
- | |
723 | if (choices[i]) { |
- | |
724 | vm->id = choices[i]; |
- | |
725 | return rdev->vm_manager.active[choices[i]]; |
- | |
726 | } |
- | |
727 | } |
- | |
728 | - | ||
729 | /* should never happen */ |
- | |
730 | BUG(); |
- | |
731 | return NULL; |
- | |
732 | } |
- | |
733 | - | ||
734 | /** |
- | |
735 | * radeon_vm_fence - remember fence for vm |
- | |
736 | * |
- | |
737 | * @rdev: radeon_device pointer |
- | |
738 | * @vm: vm we want to fence |
- | |
739 | * @fence: fence to remember |
- | |
740 | * |
- | |
741 | * Fence the vm (cayman+). |
- | |
742 | * Set the fence used to protect page table and id. |
- | |
743 | * |
- | |
744 | * Global and local mutex must be locked! |
- | |
745 | */ |
- | |
746 | void radeon_vm_fence(struct radeon_device *rdev, |
- | |
747 | struct radeon_vm *vm, |
- | |
748 | struct radeon_fence *fence) |
- | |
749 | { |
- | |
750 | radeon_fence_unref(&rdev->vm_manager.active[vm->id]); |
- | |
751 | rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence); |
- | |
752 | - | ||
753 | radeon_fence_unref(&vm->fence); |
- | |
754 | vm->fence = radeon_fence_ref(fence); |
- | |
755 | } |
- | |
756 | - | ||
757 | /** |
- | |
758 | * radeon_vm_bo_find - find the bo_va for a specific vm & bo |
- | |
759 | * |
- | |
760 | * @vm: requested vm |
- | |
761 | * @bo: requested buffer object |
- | |
762 | * |
- | |
763 | * Find @bo inside the requested vm (cayman+). |
- | |
764 | * Search inside the @bos vm list for the requested vm |
- | |
765 | * Returns the found bo_va or NULL if none is found |
- | |
766 | * |
- | |
767 | * Object has to be reserved! |
- | |
768 | */ |
- | |
769 | struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, |
- | |
770 | struct radeon_bo *bo) |
- | |
771 | { |
- | |
772 | struct radeon_bo_va *bo_va; |
- | |
773 | - | ||
774 | list_for_each_entry(bo_va, &bo->va, bo_list) { |
- | |
775 | if (bo_va->vm == vm) { |
- | |
776 | return bo_va; |
- | |
777 | } |
- | |
778 | } |
- | |
779 | return NULL; |
- | |
780 | } |
- | |
781 | - | ||
782 | /** |
- | |
783 | * radeon_vm_bo_add - add a bo to a specific vm |
- | |
784 | * |
- | |
785 | * @rdev: radeon_device pointer |
- | |
786 | * @vm: requested vm |
- | |
787 | * @bo: radeon buffer object |
- | |
788 | * |
- | |
789 | * Add @bo into the requested vm (cayman+). |
- | |
790 | * Add @bo to the list of bos associated with the vm |
- | |
791 | * Returns newly added bo_va or NULL for failure |
- | |
792 | * |
- | |
793 | * Object has to be reserved! |
- | |
794 | */ |
- | |
795 | struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev, |
- | |
796 | struct radeon_vm *vm, |
- | |
797 | struct radeon_bo *bo) |
- | |
798 | { |
- | |
799 | struct radeon_bo_va *bo_va; |
- | |
800 | - | ||
801 | bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); |
- | |
802 | if (bo_va == NULL) { |
- | |
803 | return NULL; |
- | |
804 | } |
- | |
805 | bo_va->vm = vm; |
- | |
806 | bo_va->bo = bo; |
- | |
807 | bo_va->soffset = 0; |
- | |
808 | bo_va->eoffset = 0; |
- | |
809 | bo_va->flags = 0; |
- | |
810 | bo_va->valid = false; |
- | |
811 | bo_va->ref_count = 1; |
- | |
812 | INIT_LIST_HEAD(&bo_va->bo_list); |
- | |
813 | INIT_LIST_HEAD(&bo_va->vm_list); |
- | |
814 | - | ||
815 | mutex_lock(&vm->mutex); |
- | |
816 | list_add(&bo_va->vm_list, &vm->va); |
- | |
817 | list_add_tail(&bo_va->bo_list, &bo->va); |
- | |
818 | mutex_unlock(&vm->mutex); |
- | |
819 | - | ||
820 | return bo_va; |
- | |
821 | } |
- | |
822 | - | ||
823 | /** |
- | |
824 | * radeon_vm_bo_set_addr - set bos virtual address inside a vm |
- | |
825 | * |
- | |
826 | * @rdev: radeon_device pointer |
- | |
827 | * @bo_va: bo_va to store the address |
- | |
828 | * @soffset: requested offset of the buffer in the VM address space |
- | |
829 | * @flags: attributes of pages (read/write/valid/etc.) |
- | |
830 | * |
- | |
831 | * Set offset of @bo_va (cayman+). |
- | |
832 | * Validate and set the offset requested within the vm address space. |
- | |
833 | * Returns 0 for success, error for failure. |
- | |
834 | * |
- | |
835 | * Object has to be reserved! |
- | |
836 | */ |
- | |
837 | int radeon_vm_bo_set_addr(struct radeon_device *rdev, |
- | |
838 | struct radeon_bo_va *bo_va, |
- | |
839 | uint64_t soffset, |
- | |
840 | uint32_t flags) |
- | |
841 | { |
- | |
842 | uint64_t size = radeon_bo_size(bo_va->bo); |
- | |
843 | uint64_t eoffset, last_offset = 0; |
- | |
844 | struct radeon_vm *vm = bo_va->vm; |
- | |
845 | struct radeon_bo_va *tmp; |
- | |
846 | struct list_head *head; |
- | |
847 | unsigned last_pfn; |
- | |
848 | - | ||
849 | if (soffset) { |
- | |
850 | /* make sure object fit at this offset */ |
- | |
851 | eoffset = soffset + size; |
- | |
852 | if (soffset >= eoffset) { |
- | |
853 | return -EINVAL; |
- | |
854 | } |
- | |
855 | - | ||
856 | last_pfn = eoffset / RADEON_GPU_PAGE_SIZE; |
- | |
857 | if (last_pfn > rdev->vm_manager.max_pfn) { |
- | |
858 | dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n", |
- | |
859 | last_pfn, rdev->vm_manager.max_pfn); |
- | |
860 | return -EINVAL; |
- | |
861 | } |
- | |
862 | - | ||
863 | } else { |
- | |
864 | eoffset = last_pfn = 0; |
- | |
865 | } |
- | |
866 | - | ||
867 | mutex_lock(&vm->mutex); |
- | |
868 | head = &vm->va; |
- | |
869 | last_offset = 0; |
- | |
870 | list_for_each_entry(tmp, &vm->va, vm_list) { |
- | |
871 | if (bo_va == tmp) { |
- | |
872 | /* skip over currently modified bo */ |
- | |
873 | continue; |
- | |
874 | } |
- | |
875 | - | ||
876 | if (soffset >= last_offset && eoffset <= tmp->soffset) { |
- | |
877 | /* bo can be added before this one */ |
- | |
878 | break; |
- | |
879 | } |
- | |
880 | if (eoffset > tmp->soffset && soffset < tmp->eoffset) { |
- | |
881 | /* bo and tmp overlap, invalid offset */ |
- | |
882 | dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n", |
- | |
883 | bo_va->bo, (unsigned)bo_va->soffset, tmp->bo, |
- | |
884 | (unsigned)tmp->soffset, (unsigned)tmp->eoffset); |
- | |
885 | mutex_unlock(&vm->mutex); |
- | |
886 | return -EINVAL; |
- | |
887 | } |
- | |
888 | last_offset = tmp->eoffset; |
- | |
889 | head = &tmp->vm_list; |
- | |
890 | } |
- | |
891 | - | ||
892 | bo_va->soffset = soffset; |
- | |
893 | bo_va->eoffset = eoffset; |
- | |
894 | bo_va->flags = flags; |
- | |
895 | bo_va->valid = false; |
- | |
896 | list_move(&bo_va->vm_list, head); |
- | |
897 | - | ||
898 | mutex_unlock(&vm->mutex); |
- | |
899 | return 0; |
- | |
900 | } |
- | |
901 | - | ||
902 | /** |
- | |
903 | * radeon_vm_map_gart - get the physical address of a gart page |
- | |
904 | * |
- | |
905 | * @rdev: radeon_device pointer |
- | |
906 | * @addr: the unmapped addr |
- | |
907 | * |
- | |
908 | * Look up the physical address of the page that the pte resolves |
- | |
909 | * to (cayman+). |
- | |
910 | * Returns the physical address of the page. |
- | |
911 | */ |
- | |
912 | uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr) |
- | |
913 | { |
- | |
914 | uint64_t result; |
- | |
915 | - | ||
916 | /* page table offset */ |
- | |
917 | result = rdev->gart.pages_addr[addr >> PAGE_SHIFT]; |
- | |
918 | - | ||
919 | /* in case cpu page size != gpu page size*/ |
- | |
920 | result |= addr & (~PAGE_MASK); |
- | |
921 | - | ||
922 | return result; |
- | |
923 | } |
- | |
924 | - | ||
925 | /** |
- | |
926 | * radeon_vm_update_pdes - make sure that page directory is valid |
- | |
927 | * |
- | |
928 | * @rdev: radeon_device pointer |
- | |
929 | * @vm: requested vm |
- | |
930 | * @start: start of GPU address range |
- | |
931 | * @end: end of GPU address range |
- | |
932 | * |
- | |
933 | * Allocates new page tables if necessary |
- | |
934 | * and updates the page directory (cayman+). |
- | |
935 | * Returns 0 for success, error for failure. |
- | |
936 | * |
- | |
937 | * Global and local mutex must be locked! |
- | |
938 | */ |
- | |
939 | static int radeon_vm_update_pdes(struct radeon_device *rdev, |
- | |
940 | struct radeon_vm *vm, |
- | |
941 | struct radeon_ib *ib, |
- | |
942 | uint64_t start, uint64_t end) |
- | |
943 | { |
- | |
944 | static const uint32_t incr = RADEON_VM_PTE_COUNT * 8; |
- | |
945 | - | ||
946 | uint64_t last_pde = ~0, last_pt = ~0; |
- | |
947 | unsigned count = 0; |
- | |
948 | uint64_t pt_idx; |
- | |
949 | int r; |
- | |
950 | - | ||
951 | start = (start / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE; |
- | |
952 | end = (end / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE; |
- | |
953 | - | ||
954 | /* walk over the address space and update the page directory */ |
- | |
955 | for (pt_idx = start; pt_idx <= end; ++pt_idx) { |
- | |
956 | uint64_t pde, pt; |
- | |
957 | - | ||
958 | if (vm->page_tables[pt_idx]) |
- | |
959 | continue; |
- | |
960 | - | ||
961 | retry: |
- | |
962 | r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, |
- | |
963 | &vm->page_tables[pt_idx], |
- | |
964 | RADEON_VM_PTE_COUNT * 8, |
- | |
965 | RADEON_GPU_PAGE_SIZE, false); |
- | |
966 | - | ||
967 | if (r == -ENOMEM) { |
- | |
968 | r = radeon_vm_evict(rdev, vm); |
- | |
969 | if (r) |
- | |
970 | return r; |
- | |
971 | goto retry; |
- | |
972 | } else if (r) { |
- | |
973 | return r; |
- | |
974 | } |
- | |
975 | - | ||
976 | pde = vm->pd_gpu_addr + pt_idx * 8; |
- | |
977 | - | ||
978 | pt = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]); |
- | |
979 | - | ||
980 | if (((last_pde + 8 * count) != pde) || |
- | |
981 | ((last_pt + incr * count) != pt)) { |
- | |
982 | - | ||
983 | if (count) { |
- | |
984 | radeon_asic_vm_set_page(rdev, ib, last_pde, |
- | |
985 | last_pt, count, incr, |
- | |
986 | RADEON_VM_PAGE_VALID); |
- | |
987 | } |
- | |
988 | - | ||
989 | count = 1; |
- | |
990 | last_pde = pde; |
- | |
991 | last_pt = pt; |
- | |
992 | } else { |
- | |
993 | ++count; |
- | |
994 | } |
- | |
995 | } |
- | |
996 | - | ||
997 | if (count) { |
- | |
998 | radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count, |
- | |
999 | incr, RADEON_VM_PAGE_VALID); |
- | |
1000 | - | ||
1001 | } |
- | |
1002 | - | ||
1003 | return 0; |
- | |
1004 | } |
- | |
1005 | - | ||
1006 | /** |
- | |
1007 | * radeon_vm_update_ptes - make sure that page tables are valid |
- | |
1008 | * |
- | |
1009 | * @rdev: radeon_device pointer |
- | |
1010 | * @vm: requested vm |
- | |
1011 | * @start: start of GPU address range |
- | |
1012 | * @end: end of GPU address range |
- | |
1013 | * @dst: destination address to map to |
- | |
1014 | * @flags: mapping flags |
- | |
1015 | * |
- | |
1016 | * Update the page tables in the range @start - @end (cayman+). |
- | |
1017 | * |
- | |
1018 | * Global and local mutex must be locked! |
- | |
1019 | */ |
- | |
1020 | static void radeon_vm_update_ptes(struct radeon_device *rdev, |
- | |
1021 | struct radeon_vm *vm, |
- | |
1022 | struct radeon_ib *ib, |
- | |
1023 | uint64_t start, uint64_t end, |
- | |
1024 | uint64_t dst, uint32_t flags) |
- | |
1025 | { |
- | |
1026 | static const uint64_t mask = RADEON_VM_PTE_COUNT - 1; |
- | |
1027 | - | ||
1028 | uint64_t last_pte = ~0, last_dst = ~0; |
- | |
1029 | unsigned count = 0; |
- | |
1030 | uint64_t addr; |
- | |
1031 | - | ||
1032 | start = start / RADEON_GPU_PAGE_SIZE; |
- | |
1033 | end = end / RADEON_GPU_PAGE_SIZE; |
- | |
1034 | - | ||
1035 | /* walk over the address space and update the page tables */ |
- | |
1036 | for (addr = start; addr < end; ) { |
- | |
1037 | uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE; |
- | |
1038 | unsigned nptes; |
- | |
1039 | uint64_t pte; |
- | |
1040 | - | ||
1041 | if ((addr & ~mask) == (end & ~mask)) |
- | |
1042 | nptes = end - addr; |
- | |
1043 | else |
- | |
1044 | nptes = RADEON_VM_PTE_COUNT - (addr & mask); |
- | |
1045 | - | ||
1046 | pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]); |
- | |
1047 | pte += (addr & mask) * 8; |
- | |
1048 | - | ||
1049 | if ((last_pte + 8 * count) != pte) { |
- | |
1050 | - | ||
1051 | if (count) { |
- | |
1052 | radeon_asic_vm_set_page(rdev, ib, last_pte, |
- | |
1053 | last_dst, count, |
- | |
1054 | RADEON_GPU_PAGE_SIZE, |
- | |
1055 | flags); |
- | |
1056 | } |
- | |
1057 | - | ||
1058 | count = nptes; |
- | |
1059 | last_pte = pte; |
- | |
1060 | last_dst = dst; |
- | |
1061 | } else { |
- | |
1062 | count += nptes; |
- | |
1063 | } |
- | |
1064 | - | ||
1065 | addr += nptes; |
- | |
1066 | dst += nptes * RADEON_GPU_PAGE_SIZE; |
- | |
1067 | } |
- | |
1068 | - | ||
1069 | if (count) { |
- | |
1070 | radeon_asic_vm_set_page(rdev, ib, last_pte, |
- | |
1071 | last_dst, count, |
- | |
1072 | RADEON_GPU_PAGE_SIZE, flags); |
- | |
1073 | } |
- | |
1074 | } |
- | |
1075 | - | ||
1076 | /** |
- | |
1077 | * radeon_vm_bo_update_pte - map a bo into the vm page table |
- | |
1078 | * |
- | |
1079 | * @rdev: radeon_device pointer |
- | |
1080 | * @vm: requested vm |
- | |
1081 | * @bo: radeon buffer object |
- | |
1082 | * @mem: ttm mem |
- | |
1083 | * |
- | |
1084 | * Fill in the page table entries for @bo (cayman+). |
- | |
1085 | * Returns 0 for success, -EINVAL for failure. |
- | |
1086 | * |
- | |
1087 | * Object have to be reserved & global and local mutex must be locked! |
- | |
1088 | */ |
- | |
1089 | int radeon_vm_bo_update_pte(struct radeon_device *rdev, |
- | |
1090 | struct radeon_vm *vm, |
- | |
1091 | struct radeon_bo *bo, |
- | |
1092 | struct ttm_mem_reg *mem) |
- | |
1093 | { |
- | |
1094 | unsigned ridx = rdev->asic->vm.pt_ring_index; |
- | |
1095 | struct radeon_ib ib; |
- | |
1096 | struct radeon_bo_va *bo_va; |
- | |
1097 | unsigned nptes, npdes, ndw; |
- | |
1098 | uint64_t addr; |
- | |
1099 | int r; |
- | |
1100 | - | ||
1101 | /* nothing to do if vm isn't bound */ |
- | |
1102 | if (vm->page_directory == NULL) |
- | |
1103 | return 0; |
- | |
1104 | - | ||
1105 | bo_va = radeon_vm_bo_find(vm, bo); |
- | |
1106 | if (bo_va == NULL) { |
- | |
1107 | dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm); |
- | |
1108 | return -EINVAL; |
- | |
1109 | } |
- | |
1110 | - | ||
1111 | if (!bo_va->soffset) { |
- | |
1112 | dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n", |
- | |
1113 | bo, vm); |
- | |
1114 | return -EINVAL; |
- | |
1115 | } |
- | |
1116 | - | ||
1117 | if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL)) |
- | |
1118 | return 0; |
- | |
1119 | - | ||
1120 | bo_va->flags &= ~RADEON_VM_PAGE_VALID; |
- | |
1121 | bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; |
- | |
1122 | if (mem) { |
- | |
1123 | addr = mem->start << PAGE_SHIFT; |
- | |
1124 | if (mem->mem_type != TTM_PL_SYSTEM) { |
- | |
1125 | bo_va->flags |= RADEON_VM_PAGE_VALID; |
- | |
1126 | bo_va->valid = true; |
- | |
1127 | } |
- | |
1128 | if (mem->mem_type == TTM_PL_TT) { |
- | |
1129 | bo_va->flags |= RADEON_VM_PAGE_SYSTEM; |
- | |
1130 | } else { |
- | |
1131 | addr += rdev->vm_manager.vram_base_offset; |
- | |
1132 | } |
- | |
1133 | } else { |
- | |
1134 | addr = 0; |
- | |
1135 | bo_va->valid = false; |
- | |
1136 | } |
- | |
1137 | - | ||
1138 | nptes = radeon_bo_ngpu_pages(bo); |
- | |
1139 | - | ||
1140 | /* assume two extra pdes in case the mapping overlaps the borders */ |
- | |
1141 | npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2; |
- | |
1142 | - | ||
1143 | /* padding, etc. */ |
- | |
1144 | ndw = 64; |
- | |
1145 | - | ||
1146 | if (RADEON_VM_BLOCK_SIZE > 11) |
- | |
1147 | /* reserve space for one header for every 2k dwords */ |
- | |
1148 | ndw += (nptes >> 11) * 4; |
- | |
1149 | else |
- | |
1150 | /* reserve space for one header for |
- | |
1151 | every (1 << BLOCK_SIZE) entries */ |
- | |
1152 | ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4; |
- | |
1153 | - | ||
1154 | /* reserve space for pte addresses */ |
- | |
1155 | ndw += nptes * 2; |
- | |
1156 | - | ||
1157 | /* reserve space for one header for every 2k dwords */ |
- | |
1158 | ndw += (npdes >> 11) * 4; |
- | |
1159 | - | ||
1160 | /* reserve space for pde addresses */ |
- | |
1161 | ndw += npdes * 2; |
- | |
1162 | - | ||
1163 | /* update too big for an IB */ |
- | |
1164 | if (ndw > 0xfffff) |
- | |
1165 | return -ENOMEM; |
- | |
1166 | - | ||
1167 | r = radeon_ib_get(rdev, ridx, &ib, NULL, ndw * 4); |
- | |
1168 | ib.length_dw = 0; |
- | |
1169 | - | ||
1170 | r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset); |
- | |
1171 | if (r) { |
- | |
1172 | radeon_ib_free(rdev, &ib); |
- | |
1173 | return r; |
- | |
1174 | } |
- | |
1175 | - | ||
1176 | radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset, |
- | |
1177 | addr, bo_va->flags); |
- | |
1178 | - | ||
1179 | radeon_ib_sync_to(&ib, vm->fence); |
- | |
1180 | r = radeon_ib_schedule(rdev, &ib, NULL); |
- | |
1181 | if (r) { |
- | |
1182 | radeon_ib_free(rdev, &ib); |
- | |
1183 | return r; |
- | |
1184 | } |
- | |
1185 | radeon_fence_unref(&vm->fence); |
- | |
1186 | vm->fence = radeon_fence_ref(ib.fence); |
- | |
1187 | radeon_ib_free(rdev, &ib); |
- | |
1188 | radeon_fence_unref(&vm->last_flush); |
- | |
1189 | - | ||
1190 | return 0; |
- | |
1191 | } |
- | |
1192 | - | ||
1193 | /** |
- | |
1194 | * radeon_vm_bo_rmv - remove a bo to a specific vm |
- | |
1195 | * |
- | |
1196 | * @rdev: radeon_device pointer |
- | |
1197 | * @bo_va: requested bo_va |
- | |
1198 | * |
- | |
1199 | * Remove @bo_va->bo from the requested vm (cayman+). |
- | |
1200 | * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and |
- | |
1201 | * remove the ptes for @bo_va in the page table. |
- | |
1202 | * Returns 0 for success. |
- | |
1203 | * |
- | |
1204 | * Object have to be reserved! |
- | |
1205 | */ |
- | |
1206 | int radeon_vm_bo_rmv(struct radeon_device *rdev, |
- | |
1207 | struct radeon_bo_va *bo_va) |
- | |
1208 | { |
- | |
1209 | int r = 0; |
- | |
1210 | - | ||
1211 | mutex_lock(&rdev->vm_manager.lock); |
- | |
1212 | mutex_lock(&bo_va->vm->mutex); |
- | |
1213 | if (bo_va->soffset) { |
- | |
1214 | r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); |
- | |
1215 | } |
- | |
1216 | mutex_unlock(&rdev->vm_manager.lock); |
- | |
1217 | list_del(&bo_va->vm_list); |
- | |
1218 | mutex_unlock(&bo_va->vm->mutex); |
- | |
1219 | list_del(&bo_va->bo_list); |
- | |
1220 | - | ||
1221 | kfree(bo_va); |
- | |
1222 | return r; |
- | |
1223 | } |
- | |
1224 | - | ||
1225 | /** |
- | |
1226 | * radeon_vm_bo_invalidate - mark the bo as invalid |
- | |
1227 | * |
- | |
1228 | * @rdev: radeon_device pointer |
- | |
1229 | * @vm: requested vm |
- | |
1230 | * @bo: radeon buffer object |
- | |
1231 | * |
- | |
1232 | * Mark @bo as invalid (cayman+). |
- | |
1233 | */ |
- | |
1234 | void radeon_vm_bo_invalidate(struct radeon_device *rdev, |
- | |
1235 | struct radeon_bo *bo) |
- | |
1236 | { |
- | |
1237 | struct radeon_bo_va *bo_va; |
- | |
1238 | - | ||
1239 | list_for_each_entry(bo_va, &bo->va, bo_list) { |
- | |
1240 | bo_va->valid = false; |
- | |
1241 | } |
- | |
1242 | } |
- | |
1243 | - | ||
1244 | /** |
- | |
1245 | * radeon_vm_init - initialize a vm instance |
- | |
1246 | * |
- | |
1247 | * @rdev: radeon_device pointer |
- | |
1248 | * @vm: requested vm |
- | |
1249 | * |
- | |
1250 | * Init @vm fields (cayman+). |
- | |
1251 | */ |
- | |
1252 | void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) |
- | |
1253 | { |
- | |
1254 | vm->id = 0; |
- | |
1255 | vm->fence = NULL; |
- | |
1256 | mutex_init(&vm->mutex); |
- | |
1257 | INIT_LIST_HEAD(&vm->list); |
- | |
1258 | INIT_LIST_HEAD(&vm->va); |
- | |
1259 | } |
- | |
1260 | - | ||
1261 | /** |
- | |
1262 | * radeon_vm_fini - tear down a vm instance |
- | |
1263 | * |
- | |
1264 | * @rdev: radeon_device pointer |
- | |
1265 | * @vm: requested vm |
- | |
1266 | * |
- | |
1267 | * Tear down @vm (cayman+). |
- | |
1268 | * Unbind the VM and remove all bos from the vm bo list |
- | |
1269 | */ |
- | |
1270 | void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) |
- | |
1271 | { |
- | |
1272 | struct radeon_bo_va *bo_va, *tmp; |
- | |
1273 | int r; |
- | |
Line 1274... | Line -... | ||
1274 | - | ||
1275 | mutex_lock(&rdev->vm_manager.lock); |
- | |
1276 | mutex_lock(&vm->mutex); |
377 | |
1277 | radeon_vm_free_pt(rdev, vm); |
- | |
1278 | mutex_unlock(&rdev->vm_manager.lock); |
- | |
1279 | - | ||
1280 | if (!list_empty(&vm->va)) { |
- | |
1281 | dev_err(rdev->dev, "still active bo inside vm\n"); |
- | |
1282 | } |
- | |
1283 | list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) { |
- | |
1284 | list_del_init(&bo_va->vm_list); |
- | |
1285 | r = radeon_bo_reserve(bo_va->bo, false); |
- | |
1286 | if (!r) { |
- | |
1287 | list_del_init(&bo_va->bo_list); |
- | |
1288 | radeon_bo_unreserve(bo_va->bo); |
- | |
1289 | kfree(bo_va); |
- | |
1290 | } |
- | |
1291 | } |
- | |
1292 | radeon_fence_unref(&vm->fence); |
- | |
1293 | radeon_fence_unref(&vm->last_flush); |
- | |
1294 | mutex_unlock(&vm->mutex); |
378 | radeon_dummy_page_fini(rdev); |