157,8 → 157,8 |
// WARN(1, "trying to unbind memory to unitialized GART !\n"); |
return; |
} |
t = offset / 4096; |
p = t / (PAGE_SIZE / 4096); |
t = offset / RADEON_GPU_PAGE_SIZE; |
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); |
for (i = 0; i < pages; i++, p++) { |
if (rdev->gart.pages[p]) { |
// pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], |
165,7 → 165,7 |
// PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
rdev->gart.pages[p] = NULL; |
rdev->gart.pages_addr[p] = 0; |
for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) { |
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
radeon_gart_set_page(rdev, t, 0); |
} |
} |
191,8 → 191,8 |
DRM_ERROR("trying to bind memory to unitialized GART !\n"); |
return -EINVAL; |
} |
t = offset / 4096; |
p = t / (PAGE_SIZE / 4096); |
t = offset / RADEON_GPU_PAGE_SIZE; |
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); |
|
for (i = 0; i < pages; i++, p++) { |
/* we need to support large memory configurations */ |
206,10 → 206,10 |
// return -ENOMEM; |
//} |
rdev->gart.pages[p] = pagelist[i]; |
page_base = (uint32_t)rdev->gart.pages_addr[p]; |
for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) { |
page_base = rdev->gart.pages_addr[p]; |
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
radeon_gart_set_page(rdev, t, page_base); |
page_base += 4096; |
page_base += RADEON_GPU_PAGE_SIZE; |
} |
} |
mb(); |
228,14 → 228,14 |
if (rdev->gart.pages) { |
return 0; |
} |
/* We need PAGE_SIZE >= 4096 */ |
if (PAGE_SIZE < 4096) { |
/* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */ |
if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) { |
DRM_ERROR("Page size is smaller than GPU page size!\n"); |
return -EINVAL; |
} |
/* Compute table size */ |
rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE; |
rdev->gart.num_gpu_pages = rdev->mc.gtt_size / 4096; |
rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE; |
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", |
rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages); |
/* Allocate pages table */ |