// SPDX-License-Identifier: GPL-2.0 /* * Support for Medifield PNW Camera Imaging ISP subsystem. * * Copyright (c) 2010-2017 Intel Corporation. All Rights Reserved. * * Copyright (c) 2010 Silicon Hive www.siliconhive.com. */ /* * This file contains entry functions for memory management of ISP driver */ #include #include #include #include /* for kmap */ #include /* for page_to_phys */ #include #include "hmm/hmm.h" #include "hmm/hmm_bo.h" #include "atomisp_internal.h" #include "asm/cacheflush.h" #include "mmu/isp_mmu.h" #include "mmu/sh_mmu_mrfld.h" struct hmm_bo_device bo_device; static ia_css_ptr dummy_ptr = mmgr_EXCEPTION; static bool hmm_initialized; int hmm_init(void) { int ret; ret = hmm_bo_device_init(&bo_device, &sh_mmu_mrfld, ISP_VM_START, ISP_VM_SIZE); if (ret) dev_err(atomisp_dev, "hmm_bo_device_init failed.\n"); hmm_initialized = true; /* * As hmm use NULL to indicate invalid ISP virtual address, * and ISP_VM_START is defined to 0 too, so we allocate * one piece of dummy memory, which should return value 0, * at the beginning, to avoid hmm_alloc return 0 in the * further allocation. */ dummy_ptr = hmm_alloc(1); return ret; } void hmm_cleanup(void) { if (dummy_ptr == mmgr_EXCEPTION) return; /* free dummy memory first */ hmm_free(dummy_ptr); dummy_ptr = 0; hmm_bo_device_exit(&bo_device); hmm_initialized = false; } static ia_css_ptr __hmm_alloc(size_t bytes, enum hmm_bo_type type, void *vmalloc_addr) { unsigned int pgnr; struct hmm_buffer_object *bo; int ret; /* * Check if we are initialized. In the ideal world we wouldn't need * this but we can tackle it once the driver is a lot cleaner */ if (!hmm_initialized) hmm_init(); /* Get page number from size */ pgnr = size_to_pgnr_ceil(bytes); /* Buffer object structure init */ bo = hmm_bo_alloc(&bo_device, pgnr); if (!bo) { dev_err(atomisp_dev, "hmm_bo_create failed.\n"); goto create_bo_err; } /* Allocate pages for memory */ ret = hmm_bo_alloc_pages(bo, type, vmalloc_addr); if (ret) { dev_err(atomisp_dev, "hmm_bo_alloc_pages failed.\n"); goto alloc_page_err; } /* Combine the virtual address and pages together */ ret = hmm_bo_bind(bo); if (ret) { dev_err(atomisp_dev, "hmm_bo_bind failed.\n"); goto bind_err; } return bo->start; bind_err: hmm_bo_free_pages(bo); alloc_page_err: hmm_bo_unref(bo); create_bo_err: return 0; } ia_css_ptr hmm_alloc(size_t bytes) { return __hmm_alloc(bytes, HMM_BO_PRIVATE, NULL); } ia_css_ptr hmm_create_from_vmalloc_buf(size_t bytes, void *vmalloc_addr) { return __hmm_alloc(bytes, HMM_BO_VMALLOC, vmalloc_addr); } void hmm_free(ia_css_ptr virt) { struct hmm_buffer_object *bo; if (WARN_ON(virt == mmgr_EXCEPTION)) return; bo = hmm_bo_device_search_start(&bo_device, (unsigned int)virt); if (!bo) { dev_err(atomisp_dev, "can not find buffer object start with address 0x%x\n", (unsigned int)virt); return; } hmm_bo_unbind(bo); hmm_bo_free_pages(bo); hmm_bo_unref(bo); } static inline int hmm_check_bo(struct hmm_buffer_object *bo, unsigned int ptr) { if (!bo) { dev_err(atomisp_dev, "can not find buffer object contains address 0x%x\n", ptr); return -EINVAL; } if (!hmm_bo_page_allocated(bo)) { dev_err(atomisp_dev, "buffer object has no page allocated.\n"); return -EINVAL; } if (!hmm_bo_allocated(bo)) { dev_err(atomisp_dev, "buffer object has no virtual address space allocated.\n"); return -EINVAL; } return 0; } /* Read function in ISP memory management */ static int load_and_flush_by_kmap(ia_css_ptr virt, void *data, unsigned int bytes) { struct hmm_buffer_object *bo; unsigned int idx, offset, len; char *src, *des; int ret; bo = hmm_bo_device_search_in_range(&bo_device, virt); ret = hmm_check_bo(bo, virt); if (ret) return ret; des = (char *)data; while (bytes) { idx = (virt - bo->start) >> PAGE_SHIFT; offset = (virt - bo->start) - (idx << PAGE_SHIFT); src = (char *)kmap_local_page(bo->pages[idx]) + offset; if ((bytes + offset) >= PAGE_SIZE) { len = PAGE_SIZE - offset; bytes -= len; } else { len = bytes; bytes = 0; } virt += len; /* update virt for next loop */ if (des) { memcpy(des, src, len); des += len; } clflush_cache_range(src, len); kunmap_local(src); } return 0; } /* Read function in ISP memory management */ static int load_and_flush(ia_css_ptr virt, void *data, unsigned int bytes) { struct hmm_buffer_object *bo; int ret; bo = hmm_bo_device_search_in_range(&bo_device, virt); ret = hmm_check_bo(bo, virt); if (ret) return ret; if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) { void *src = bo->vmap_addr; src += (virt - bo->start); memcpy(data, src, bytes); if (bo->status & HMM_BO_VMAPED_CACHED) clflush_cache_range(src, bytes); } else { void *vptr; vptr = hmm_bo_vmap(bo, true); if (!vptr) return load_and_flush_by_kmap(virt, data, bytes); else vptr = vptr + (virt - bo->start); memcpy(data, vptr, bytes); clflush_cache_range(vptr, bytes); hmm_bo_vunmap(bo); } return 0; } /* Read function in ISP memory management */ int hmm_load(ia_css_ptr virt, void *data, unsigned int bytes) { if (!virt) { dev_warn(atomisp_dev, "hmm_store: address is NULL\n"); return -EINVAL; } if (!data) { dev_err(atomisp_dev, "hmm_store: data is a NULL argument\n"); return -EINVAL; } return load_and_flush(virt, data, bytes); } /* Flush hmm data from the data cache */ int hmm_flush(ia_css_ptr virt, unsigned int bytes) { return load_and_flush(virt, NULL, bytes); } /* Write function in ISP memory management */ int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes) { struct hmm_buffer_object *bo; unsigned int idx, offset, len; char *src, *des; int ret; if (!virt) { dev_warn(atomisp_dev, "hmm_store: address is NULL\n"); return -EINVAL; } if (!data) { dev_err(atomisp_dev, "hmm_store: data is a NULL argument\n"); return -EINVAL; } bo = hmm_bo_device_search_in_range(&bo_device, virt); ret = hmm_check_bo(bo, virt); if (ret) return ret; if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) { void *dst = bo->vmap_addr; dst += (virt - bo->start); memcpy(dst, data, bytes); if (bo->status & HMM_BO_VMAPED_CACHED) clflush_cache_range(dst, bytes); } else { void *vptr; vptr = hmm_bo_vmap(bo, true); if (vptr) { vptr = vptr + (virt - bo->start); memcpy(vptr, data, bytes); clflush_cache_range(vptr, bytes); hmm_bo_vunmap(bo); return 0; } } src = (char *)data; while (bytes) { idx = (virt - bo->start) >> PAGE_SHIFT; offset = (virt - bo->start) - (idx << PAGE_SHIFT); des = (char *)kmap_local_page(bo->pages[idx]); if (!des) { dev_err(atomisp_dev, "kmap buffer object page failed: pg_idx = %d\n", idx); return -EINVAL; } des += offset; if ((bytes + offset) >= PAGE_SIZE) { len = PAGE_SIZE - offset; bytes -= len; } else { len = bytes; bytes = 0; } virt += len; memcpy(des, src, len); src += len; clflush_cache_range(des, len); kunmap_local(des); } return 0; } /* memset function in ISP memory management */ int hmm_set(ia_css_ptr virt, int c, unsigned int bytes) { struct hmm_buffer_object *bo; unsigned int idx, offset, len; char *des; int ret; bo = hmm_bo_device_search_in_range(&bo_device, virt); ret = hmm_check_bo(bo, virt); if (ret) return ret; if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) { void *dst = bo->vmap_addr; dst += (virt - bo->start); memset(dst, c, bytes); if (bo->status & HMM_BO_VMAPED_CACHED) clflush_cache_range(dst, bytes); } else { void *vptr; vptr = hmm_bo_vmap(bo, true); if (vptr) { vptr = vptr + (virt - bo->start); memset(vptr, c, bytes); clflush_cache_range(vptr, bytes); hmm_bo_vunmap(bo); return 0; } } while (bytes) { idx = (virt - bo->start) >> PAGE_SHIFT; offset = (virt - bo->start) - (idx << PAGE_SHIFT); des = (char *)kmap_local_page(bo->pages[idx]) + offset; if ((bytes + offset) >= PAGE_SIZE) { len = PAGE_SIZE - offset; bytes -= len; } else { len = bytes; bytes = 0; } virt += len; memset(des, c, len); clflush_cache_range(des, len); kunmap_local(des); } return 0; } /* Virtual address to physical address convert */ phys_addr_t hmm_virt_to_phys(ia_css_ptr virt) { unsigned int idx, offset; struct hmm_buffer_object *bo; bo = hmm_bo_device_search_in_range(&bo_device, virt); if (!bo) { dev_err(atomisp_dev, "can not find buffer object contains address 0x%x\n", virt); return -1; } idx = (virt - bo->start) >> PAGE_SHIFT; offset = (virt - bo->start) - (idx << PAGE_SHIFT); return page_to_phys(bo->pages[idx]) + offset; } int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt) { struct hmm_buffer_object *bo; bo = hmm_bo_device_search_start(&bo_device, virt); if (!bo) { dev_err(atomisp_dev, "can not find buffer object start with address 0x%x\n", virt); return -EINVAL; } return hmm_bo_mmap(vma, bo); } /* Map ISP virtual address into IA virtual address */ void *hmm_vmap(ia_css_ptr virt, bool cached) { struct hmm_buffer_object *bo; void *ptr; bo = hmm_bo_device_search_in_range(&bo_device, virt); if (!bo) { dev_err(atomisp_dev, "can not find buffer object contains address 0x%x\n", virt); return NULL; } ptr = hmm_bo_vmap(bo, cached); if (ptr) return ptr + (virt - bo->start); else return NULL; } /* Flush the memory which is mapped as cached memory through hmm_vmap */ void hmm_flush_vmap(ia_css_ptr virt) { struct hmm_buffer_object *bo; bo = hmm_bo_device_search_in_range(&bo_device, virt); if (!bo) { dev_warn(atomisp_dev, "can not find buffer object contains address 0x%x\n", virt); return; } hmm_bo_flush_vmap(bo); } void hmm_vunmap(ia_css_ptr virt) { struct hmm_buffer_object *bo; bo = hmm_bo_device_search_in_range(&bo_device, virt); if (!bo) { dev_warn(atomisp_dev, "can not find buffer object contains address 0x%x\n", virt); return; } hmm_bo_vunmap(bo); }